source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
base_camera.py
|
import time
import threading
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import pytesseract
import picamera
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = CameraEvent()
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
@staticmethod
def frames():
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
@classmethod
def _thread(cls):
"""Camera background thread."""
print('Starting camera thread.')
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
if time.time() - BaseCamera.last_access > 10:
frames_iterator.close()
print('Stopping camera thread due to inactivity.')
break
BaseCamera.thread = None
|
agent.py
|
#
# Copyright 2021 Mobvista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
from datetime import datetime
import threading
import concurrent.futures
import traceback
import json
import os
from ._mindalpha import ActorConfig
from ._mindalpha import NodeRole
from ._mindalpha import Message
from ._mindalpha import PSRunner
from ._mindalpha import PSDefaultAgent
from .metric import ModelMetric
from .network_utils import get_available_endpoint
from .url_utils import use_s3a
class Agent(object):
_instances = dict()
_instances_lock = threading.Lock()
@property
def _cxx_agent(self):
cxx_agent = getattr(self, '_Agent__cxx_agent', None)
if cxx_agent is None:
message = "the adaptor agent is not initialized"
raise RuntimeError(message)
return cxx_agent
def _finalize(self):
# The C++ and Python agent objects reference each other.
# The reference cycle are broken in the finalize methods.
#
# This complicated mechanism is needed to avoid deriving
# the ``mindalpha.Agent`` class from ``_mindalpha.PSDefaultAgent``,
# which makes it possible to define custom agent classes
# in Jupyter notebooks.
self.__cxx_agent = None
def run(self):
pass
def handle_request(self, req):
res = Message()
self.send_response(req, res)
@property
def is_coordinator(self):
return self._cxx_agent.is_coordinator
@property
def is_server(self):
return self._cxx_agent.is_server
@property
def is_worker(self):
return self._cxx_agent.is_worker
@property
def server_count(self):
return self._cxx_agent.server_count
@property
def worker_count(self):
return self._cxx_agent.worker_count
@property
def rank(self):
return self._cxx_agent.rank
def barrier(self, group=None):
if group is None:
self._cxx_agent.barrier()
else:
self._cxx_agent.barrier(group)
def shutdown(self):
self._cxx_agent.shutdown()
def send_request(self, req, cb):
self._cxx_agent.send_request(req, cb)
def send_all_requests(self, reqs, cb):
self._cxx_agent.send_all_requests(reqs, cb)
def broadcast_request(self, req, cb):
self._cxx_agent.broadcast_request(req, cb)
def send_response(self, req, res):
self._cxx_agent.send_response(req, res)
def __str__(self):
return self._cxx_agent.__str__()
@property
def spark_session(self):
if not self.is_coordinator:
message = "spark session is only available on coordinator"
raise RuntimeError(message)
session = getattr(self, '_Agent__spark_session', None)
if session is None:
message = "spark session is not initialized"
raise RuntimeError(message)
return session
@property
def spark_context(self):
if not self.is_coordinator:
message = "spark context is only available on coordinator"
raise RuntimeError(message)
session = getattr(self, '_Agent__spark_session', None)
if session is None:
message = "spark session is not initialized"
raise RuntimeError(message)
context = session.sparkContext
return context
@classmethod
def _register_instance(cls, ident, instance):
with cls._instances_lock:
if ident in cls._instances:
message = "more than one ps agents are registered for thread 0x%x" % ident
raise RuntimeError(message)
cls._instances[ident] = instance
print('\033[38;5;046mps agent registered for process %d thread 0x%x\033[m' % (os.getpid(), ident))
@classmethod
def _deregister_instance(cls, ident):
with cls._instances_lock:
try:
del cls._instances[ident]
print('\033[38;5;196mps agent deregistered for process %d thread 0x%x\033[m' % (os.getpid(), ident))
except KeyError:
message = "during deregister instance, no ps agent registered for thread 0x%x on pid %d" % (threading.current_thread().ident, os.getpid())
raise RuntimeError(message)
@classmethod
def get_instance(cls, ident=None):
if ident is None:
ident = threading.current_thread().ident
with cls._instances_lock:
instance = cls._instances.get(ident)
if instance is None:
message = "no ps agent registered for thread 0x%x on pid %d" % (ident, os.getpid())
raise RuntimeError(message)
return instance
@classmethod
def _get_actor_config(cls, args):
conf = ActorConfig()
conf.root_uri = args['root_uri']
conf.root_port = args['root_port']
conf.node_role = NodeRole.__members__[args['node_role']]
conf.agent_creator = args.get('agent_creator', cls)
agent_ready_callback = args.get('agent_ready_callback')
if agent_ready_callback is not None:
conf.agent_ready_callback = agent_ready_callback
conf.server_count = args['server_count']
conf.worker_count = args['worker_count']
conf.is_message_dumping_enabled = args.get('is_message_dumping_enabled', False)
return conf
@classmethod
def _get_reserved_attributes(cls):
reserved = frozenset(dir(cls))
return reserved
@classmethod
def _load_agent_attributes(cls, inst, args):
attrs = args['agent_attributes']
reserved = cls._get_reserved_attributes()
for name, value in attrs.items():
if name in reserved:
message = "agent attribute %r is reserved, " % name
message += "specifying it in config file and "
message += "overriding it with --conf are forbidden"
raise RuntimeError(message)
setattr(inst, name, value)
@classmethod
def _create_agent(cls):
cxx_agent = PSDefaultAgent()
py_agent = cls()
py_agent.__cxx_agent = cxx_agent
cxx_agent.py_agent = py_agent
return cxx_agent
@classmethod
def _run_server(cls, args, _):
# Server processes block the spark method call so that later computational
# spark method calls won't be performed on those server processes.
def create_server():
inst = cls._create_agent()
cls._load_agent_attributes(inst.py_agent, args)
return inst
def server_ready(agent):
print('PS Server node \033[38;5;196m%s\033[m is ready.' % agent)
args = args.copy()
args.update(agent_creator=create_server)
args.update(agent_ready_callback=server_ready)
args.update(node_role='Server')
conf = cls._get_actor_config(args)
PSRunner.run_ps(conf)
return _
@classmethod
def _run_worker(cls, args, _):
ident = threading.current_thread().ident
ready = concurrent.futures.Future()
def create_worker():
inst = cls._create_agent()
cls._load_agent_attributes(inst.py_agent, args)
cls._register_instance(ident, inst.py_agent)
return inst
def worker_ready(agent):
print('PS Worker node \033[38;5;051m%s\033[m is ready.' % agent)
ready.set_result(None)
args = args.copy()
args.update(agent_creator=create_worker)
args.update(agent_ready_callback=worker_ready)
args.update(node_role='Worker')
def run_worker():
try:
conf = cls._get_actor_config(args)
PSRunner.run_ps(conf)
except Exception:
traceback.print_exc()
raise SystemExit(1)
finally:
cls._deregister_instance(ident)
# Worker processes must run in background mode so that the spark method call
# can return immediately and later computational spark method calls can be
# performed on those worker processes.
thread = threading.Thread(target=run_worker, name='run_worker', daemon=True)
thread.start()
# Wait until the agent is ready which means all the PS nodes are connected to
# each other.
ready.result()
return _
@classmethod
def _launch_coordinator(cls, args, spark_session, launcher):
def create_coordinator():
inst = cls._create_agent()
inst.py_agent.__spark_session = spark_session
cls._load_agent_attributes(inst.py_agent, args)
launcher._initialize_agent(inst.py_agent)
return inst
def coordinator_ready(agent):
print('PS Coordinator node \033[32m%s\033[m is ready.' % agent)
args = args.copy()
args.update(agent_creator=create_coordinator)
args.update(agent_ready_callback=coordinator_ready)
args.update(node_role='Coordinator')
loop = asyncio.get_running_loop()
future = loop.create_future()
def launch_coordinator():
try:
conf = cls._get_actor_config(args)
PSRunner.run_ps(conf)
loop.call_soon_threadsafe(future.set_result, None)
except Exception as e:
loop.call_soon_threadsafe(future.set_exception, e)
thread = threading.Thread(target=launch_coordinator, name='launch_coordinator', daemon=True)
thread.start()
return future
@classmethod
def _launch_servers(cls, args, spark_session):
loop = asyncio.get_running_loop()
future = loop.create_future()
def launch_servers():
try:
server_count = args['server_count']
spark_context = spark_session.sparkContext
rdd = spark_context.parallelize(range(server_count), server_count)
rdd.barrier().mapPartitions(lambda _: cls._run_server(args, _)).collect()
loop.call_soon_threadsafe(future.set_result, None)
except Exception as e:
loop.call_soon_threadsafe(future.set_exception, e)
thread = threading.Thread(target=launch_servers, name='launch_servers', daemon=True)
thread.start()
return future
@classmethod
def _launch_workers(cls, args, spark_session):
loop = asyncio.get_running_loop()
future = loop.create_future()
def launch_workers():
try:
worker_count = args['worker_count']
spark_context = spark_session.sparkContext
rdd = spark_context.parallelize(range(worker_count), worker_count)
rdd.barrier().mapPartitions(lambda _: cls._run_worker(args, _)).collect()
loop.call_soon_threadsafe(future.set_result, None)
except Exception as e:
loop.call_soon_threadsafe(future.set_exception, e)
thread = threading.Thread(target=launch_workers, name='launch_workers', daemon=True)
thread.start()
return future
@classmethod
async def _launch(cls, args, spark_session, launcher):
ip, port = get_available_endpoint()
args = args.copy()
args.update(root_uri=ip)
args.update(root_port=port)
futures = []
futures.append(cls._launch_servers(args, spark_session))
futures.append(cls._launch_workers(args, spark_session))
futures.append(cls._launch_coordinator(args, spark_session, launcher))
await asyncio.gather(*futures)
def worker_start(self):
pass
def worker_stop(self):
pass
@classmethod
def _worker_start(cls, _):
self = __class__.get_instance()
self.worker_start()
return _
@classmethod
def _worker_stop(cls, _):
self = __class__.get_instance()
self.worker_stop()
return _
def start_workers(self):
rdd = self.spark_context.parallelize(range(self.worker_count), self.worker_count)
rdd.barrier().mapPartitions(self._worker_start).collect()
def stop_workers(self):
rdd = self.spark_context.parallelize(range(self.worker_count), self.worker_count)
rdd.barrier().mapPartitions(self._worker_stop).collect()
def load_dataset(self, dataset_path):
from pyspark.sql import functions as F
dataset_path = use_s3a(dataset_path)
df = (self.spark_session.read
.format('csv')
.option('header', 'false')
.option('nullable', 'false')
.option('delimiter', '\002')
.option('encoding', 'UTF-8')
.load(dataset_path))
return df.select(F.array(df.columns))
def feed_training_dataset(self, dataset_path, nepoches=1):
for epoch in range(nepoches):
df = self.load_dataset(dataset_path)
df = df.select(self.feed_training_minibatch()(*df.columns).alias('train'))
df.write.format('noop').mode('overwrite').save()
def feed_validation_dataset(self, dataset_path, nepoches=1):
for epoch in range(nepoches):
df = self.load_dataset(dataset_path)
df = df.select(self.feed_validation_minibatch()(*df.columns).alias('validate'))
df.write.format('noop').mode('overwrite').save()
def feed_training_minibatch(self):
from pyspark.sql.types import FloatType
from pyspark.sql.functions import pandas_udf
@pandas_udf(returnType=FloatType())
def _feed_training_minibatch(minibatch):
self = __class__.get_instance()
result = self.train_minibatch(minibatch)
result = self.process_minibatch_result(minibatch, result)
return result
return _feed_training_minibatch
def feed_validation_minibatch(self):
from pyspark.sql.types import FloatType
from pyspark.sql.functions import pandas_udf
@pandas_udf(returnType=FloatType())
def _feed_validation_minibatch(minibatch):
self = __class__.get_instance()
result = self.validate_minibatch(minibatch)
result = self.process_minibatch_result(minibatch, result)
return result
return _feed_validation_minibatch
def preprocess_minibatch(self, minibatch):
import numpy as np
import pandas as pd
columns = minibatch.apply(pd.Series)
ndarrays = list(columns.values.T)
labels = columns[1].values.astype(np.float32)
return ndarrays, labels
def process_minibatch_result(self, minibatch, result):
import pandas as pd
if result is None:
result = pd.Series([0.0] * len(minibatch))
if len(result) != len(minibatch):
message = "result length (%d) and " % len(result)
message += "minibatch size (%d) mismatch" % len(minibatch)
raise RuntimeError(message)
if not isinstance(result, pd.Series):
if len(result.reshape(-1)) == len(minibatch):
result = result.reshape(-1)
else:
message = "result can not be converted to pandas series; "
message += "result.shape: {}, ".format(result.shape)
message += "minibatch_size: {}".format(len(minibatch))
raise RuntimeError(message)
result = pd.Series(result)
return result
def train_minibatch(self, minibatch):
message = "Agent.train_minibatch method is not implemented"
raise NotImplementedError(message)
def validate_minibatch(self, minibatch):
message = "Agent.validate_minibatch method is not implemented"
raise NotImplementedError(message)
def _create_metric(self):
metric = ModelMetric()
return metric
@property
def _metric(self):
metric = getattr(self, '_Agent__metric', None)
if metric is None:
metric = self._create_metric()
self.__metric = metric
return metric
def update_metric(self, predictions, labels):
self._metric.accumulate(predictions.data.numpy(), labels.data.numpy())
def push_metric(self):
body = dict(command='PushMetric')
req = Message()
req.body = json.dumps(body)
req.receiver = 0 << 4 | 8 | 1
states = self._metric.get_states()
for state in states:
req.add_slice(state)
def push_metric_callback(req, res):
self.clear_metric()
self.send_request(req, push_metric_callback)
def clear_metric(self):
self._metric.clear()
def handle_request(self, req):
body = json.loads(req.body)
command = body.get('command')
if command == 'PushMetric':
states = ()
for i in range(req.slice_count):
states += req.get_slice(i),
accum = self._metric
delta = ModelMetric.from_states(states)
accum.merge(delta)
string = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
string += f' -- auc: {accum.compute_auc()}'
string += f', \u0394auc: {delta.compute_auc()}'
string += f', pcoc: {accum.compute_pcoc()}'
string += f', \u0394pcoc: {delta.compute_pcoc()}'
string += f', #instance: {accum.instance_count}'
if accum.threshold > 0.0:
string += f', accuracy: {accum.compute_accuracy()}'
string += f', precision: {accum.compute_precision()}'
string += f', recall: {accum.compute_recall()}'
string += f', F{accum.beta:g}_score: {accum.compute_f_score()}'
print(string)
res = Message()
self.send_response(req, res)
return
super().handle_request(req)
|
test_connection.py
|
#!/usr/bin/env python
# test_connection.py - unit test for connection attributes
#
# Copyright (C) 2008-2011 James Henstridge <james@jamesh.id.au>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import os
import sys
import time
import threading
from operator import attrgetter
import psycopg2
import psycopg2.errorcodes
from psycopg2 import extensions as ext
from testutils import (
unittest, decorate_all_tests, skip_if_no_superuser,
skip_before_postgres, skip_after_postgres, skip_before_libpq,
ConnectingTestCase, skip_if_tpc_disabled, skip_if_windows, slow)
from testconfig import dsn, dbname
class ConnectionTests(ConnectingTestCase):
def test_closed_attribute(self):
conn = self.conn
self.assertEqual(conn.closed, False)
conn.close()
self.assertEqual(conn.closed, True)
def test_close_idempotent(self):
conn = self.conn
conn.close()
conn.close()
self.assert_(conn.closed)
def test_cursor_closed_attribute(self):
conn = self.conn
curs = conn.cursor()
self.assertEqual(curs.closed, False)
curs.close()
self.assertEqual(curs.closed, True)
# Closing the connection closes the cursor:
curs = conn.cursor()
conn.close()
self.assertEqual(curs.closed, True)
@skip_before_postgres(8, 4)
@skip_if_no_superuser
@skip_if_windows
def test_cleanup_on_badconn_close(self):
# ticket #148
conn = self.conn
cur = conn.cursor()
try:
cur.execute("select pg_terminate_backend(pg_backend_pid())")
except psycopg2.OperationalError, e:
if e.pgcode != psycopg2.errorcodes.ADMIN_SHUTDOWN:
raise
except psycopg2.DatabaseError, e:
# curiously when disconnected in green mode we get a DatabaseError
# without pgcode.
if e.pgcode is not None:
raise
self.assertEqual(conn.closed, 2)
conn.close()
self.assertEqual(conn.closed, 1)
def test_reset(self):
conn = self.conn
# switch session characteristics
conn.autocommit = True
conn.isolation_level = 'serializable'
conn.readonly = True
if self.conn.server_version >= 90100:
conn.deferrable = False
self.assert_(conn.autocommit)
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE)
self.assert_(conn.readonly is True)
if self.conn.server_version >= 90100:
self.assert_(conn.deferrable is False)
conn.reset()
# now the session characteristics should be reverted
self.assert_(not conn.autocommit)
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_DEFAULT)
self.assert_(conn.readonly is None)
if self.conn.server_version >= 90100:
self.assert_(conn.deferrable is None)
def test_notices(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("create temp table chatty (id serial primary key);")
self.assertEqual("CREATE TABLE", cur.statusmessage)
self.assert_(conn.notices)
def test_notices_consistent_order(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("""
create temp table table1 (id serial);
create temp table table2 (id serial);
""")
cur.execute("""
create temp table table3 (id serial);
create temp table table4 (id serial);
""")
self.assertEqual(4, len(conn.notices))
self.assert_('table1' in conn.notices[0])
self.assert_('table2' in conn.notices[1])
self.assert_('table3' in conn.notices[2])
self.assert_('table4' in conn.notices[3])
@slow
def test_notices_limited(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
for i in range(0, 100, 10):
sql = " ".join(["create temp table table%d (id serial);" % j
for j in range(i, i + 10)])
cur.execute(sql)
self.assertEqual(50, len(conn.notices))
self.assert_('table99' in conn.notices[-1], conn.notices[-1])
@slow
def test_notices_deque(self):
from collections import deque
conn = self.conn
self.conn.notices = deque()
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("""
create temp table table1 (id serial);
create temp table table2 (id serial);
""")
cur.execute("""
create temp table table3 (id serial);
create temp table table4 (id serial);""")
self.assertEqual(len(conn.notices), 4)
self.assert_('table1' in conn.notices.popleft())
self.assert_('table2' in conn.notices.popleft())
self.assert_('table3' in conn.notices.popleft())
self.assert_('table4' in conn.notices.popleft())
self.assertEqual(len(conn.notices), 0)
# not limited, but no error
for i in range(0, 100, 10):
sql = " ".join(["create temp table table2_%d (id serial);" % j
for j in range(i, i + 10)])
cur.execute(sql)
self.assertEqual(len([n for n in conn.notices if 'CREATE TABLE' in n]),
100)
def test_notices_noappend(self):
conn = self.conn
self.conn.notices = None # will make an error swallowes ok
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("create temp table table1 (id serial);")
self.assertEqual(self.conn.notices, None)
def test_server_version(self):
self.assert_(self.conn.server_version)
def test_protocol_version(self):
self.assert_(self.conn.protocol_version in (2, 3),
self.conn.protocol_version)
def test_tpc_unsupported(self):
cnn = self.conn
if cnn.server_version >= 80100:
return self.skipTest("tpc is supported")
self.assertRaises(psycopg2.NotSupportedError,
cnn.xid, 42, "foo", "bar")
@slow
@skip_before_postgres(8, 2)
def test_concurrent_execution(self):
def slave():
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select pg_sleep(4)")
cur.close()
cnn.close()
t1 = threading.Thread(target=slave)
t2 = threading.Thread(target=slave)
t0 = time.time()
t1.start()
t2.start()
t1.join()
t2.join()
self.assert_(time.time() - t0 < 7,
"something broken in concurrency")
def test_encoding_name(self):
self.conn.set_client_encoding("EUC_JP")
# conn.encoding is 'EUCJP' now.
cur = self.conn.cursor()
ext.register_type(ext.UNICODE, cur)
cur.execute("select 'foo'::text;")
self.assertEqual(cur.fetchone()[0], u'foo')
def test_connect_nonnormal_envvar(self):
# We must perform encoding normalization at connection time
self.conn.close()
oldenc = os.environ.get('PGCLIENTENCODING')
os.environ['PGCLIENTENCODING'] = 'utf-8' # malformed spelling
try:
self.conn = self.connect()
finally:
if oldenc is not None:
os.environ['PGCLIENTENCODING'] = oldenc
else:
del os.environ['PGCLIENTENCODING']
def test_weakref(self):
from weakref import ref
import gc
conn = psycopg2.connect(dsn)
w = ref(conn)
conn.close()
del conn
gc.collect()
self.assert_(w() is None)
@slow
def test_commit_concurrency(self):
# The problem is the one reported in ticket #103. Because of bad
# status check, we commit even when a commit is already on its way.
# We can detect this condition by the warnings.
conn = self.conn
notices = []
stop = []
def committer():
while not stop:
conn.commit()
while conn.notices:
notices.append((2, conn.notices.pop()))
cur = conn.cursor()
t1 = threading.Thread(target=committer)
t1.start()
i = 1
for i in range(1000):
cur.execute("select %s;", (i,))
conn.commit()
while conn.notices:
notices.append((1, conn.notices.pop()))
# Stop the committer thread
stop.append(True)
self.assert_(not notices, "%d notices raised" % len(notices))
def test_connect_cursor_factory(self):
import psycopg2.extras
conn = self.connect(cursor_factory=psycopg2.extras.DictCursor)
cur = conn.cursor()
cur.execute("select 1 as a")
self.assertEqual(cur.fetchone()['a'], 1)
def test_cursor_factory(self):
self.assertEqual(self.conn.cursor_factory, None)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertRaises(TypeError, (lambda r: r['a']), cur.fetchone())
self.conn.cursor_factory = psycopg2.extras.DictCursor
self.assertEqual(self.conn.cursor_factory, psycopg2.extras.DictCursor)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertEqual(cur.fetchone()['a'], 1)
self.conn.cursor_factory = None
self.assertEqual(self.conn.cursor_factory, None)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertRaises(TypeError, (lambda r: r['a']), cur.fetchone())
def test_cursor_factory_none(self):
# issue #210
conn = self.connect()
cur = conn.cursor(cursor_factory=None)
self.assertEqual(type(cur), ext.cursor)
conn = self.connect(cursor_factory=psycopg2.extras.DictCursor)
cur = conn.cursor(cursor_factory=None)
self.assertEqual(type(cur), psycopg2.extras.DictCursor)
def test_failed_init_status(self):
class SubConnection(ext.connection):
def __init__(self, dsn):
try:
super(SubConnection, self).__init__(dsn)
except Exception:
pass
c = SubConnection("dbname=thereisnosuchdatabasemate password=foobar")
self.assert_(c.closed, "connection failed so it must be closed")
self.assert_('foobar' not in c.dsn, "password was not obscured")
class ParseDsnTestCase(ConnectingTestCase):
def test_parse_dsn(self):
from psycopg2 import ProgrammingError
self.assertEqual(
ext.parse_dsn('dbname=test user=tester password=secret'),
dict(user='tester', password='secret', dbname='test'),
"simple DSN parsed")
self.assertRaises(ProgrammingError, ext.parse_dsn,
"dbname=test 2 user=tester password=secret")
self.assertEqual(
ext.parse_dsn("dbname='test 2' user=tester password=secret"),
dict(user='tester', password='secret', dbname='test 2'),
"DSN with quoting parsed")
# Can't really use assertRaisesRegexp() here since we need to
# make sure that secret is *not* exposed in the error messgage
# (and it also requires python >= 2.7).
raised = False
try:
# unterminated quote after dbname:
ext.parse_dsn("dbname='test 2 user=tester password=secret")
except ProgrammingError, e:
raised = True
self.assertTrue(str(e).find('secret') < 0,
"DSN was not exposed in error message")
except e:
self.fail("unexpected error condition: " + repr(e))
self.assertTrue(raised, "ProgrammingError raised due to invalid DSN")
@skip_before_libpq(9, 2)
def test_parse_dsn_uri(self):
self.assertEqual(ext.parse_dsn('postgresql://tester:secret@/test'),
dict(user='tester', password='secret', dbname='test'),
"valid URI dsn parsed")
raised = False
try:
# extra '=' after port value
ext.parse_dsn(dsn='postgresql://tester:secret@/test?port=1111=x')
except psycopg2.ProgrammingError, e:
raised = True
self.assertTrue(str(e).find('secret') < 0,
"URI was not exposed in error message")
except e:
self.fail("unexpected error condition: " + repr(e))
self.assertTrue(raised, "ProgrammingError raised due to invalid URI")
def test_unicode_value(self):
snowman = u"\u2603"
d = ext.parse_dsn('dbname=' + snowman)
if sys.version_info[0] < 3:
self.assertEqual(d['dbname'], snowman.encode('utf8'))
else:
self.assertEqual(d['dbname'], snowman)
def test_unicode_key(self):
snowman = u"\u2603"
self.assertRaises(psycopg2.ProgrammingError, ext.parse_dsn,
snowman + '=' + snowman)
def test_bad_param(self):
self.assertRaises(TypeError, ext.parse_dsn, None)
self.assertRaises(TypeError, ext.parse_dsn, 42)
class MakeDsnTestCase(ConnectingTestCase):
def test_empty_arguments(self):
self.assertEqual(ext.make_dsn(), '')
def test_empty_string(self):
dsn = ext.make_dsn('')
self.assertEqual(dsn, '')
def test_params_validation(self):
self.assertRaises(psycopg2.ProgrammingError,
ext.make_dsn, 'dbnamo=a')
self.assertRaises(psycopg2.ProgrammingError,
ext.make_dsn, dbnamo='a')
self.assertRaises(psycopg2.ProgrammingError,
ext.make_dsn, 'dbname=a', nosuchparam='b')
def test_empty_param(self):
dsn = ext.make_dsn(dbname='sony', password='')
self.assertDsnEqual(dsn, "dbname=sony password=''")
def test_escape(self):
dsn = ext.make_dsn(dbname='hello world')
self.assertEqual(dsn, "dbname='hello world'")
dsn = ext.make_dsn(dbname=r'back\slash')
self.assertEqual(dsn, r"dbname=back\\slash")
dsn = ext.make_dsn(dbname="quo'te")
self.assertEqual(dsn, r"dbname=quo\'te")
dsn = ext.make_dsn(dbname="with\ttab")
self.assertEqual(dsn, "dbname='with\ttab'")
dsn = ext.make_dsn(dbname=r"\every thing'")
self.assertEqual(dsn, r"dbname='\\every thing\''")
def test_database_is_a_keyword(self):
self.assertEqual(ext.make_dsn(database='sigh'), "dbname=sigh")
def test_params_merging(self):
dsn = ext.make_dsn('dbname=foo host=bar', host='baz')
self.assertDsnEqual(dsn, 'dbname=foo host=baz')
dsn = ext.make_dsn('dbname=foo', user='postgres')
self.assertDsnEqual(dsn, 'dbname=foo user=postgres')
def test_no_dsn_munging(self):
dsnin = 'dbname=a host=b user=c password=d'
dsn = ext.make_dsn(dsnin)
self.assertEqual(dsn, dsnin)
def test_null_args(self):
dsn = ext.make_dsn("dbname=foo", user="bar", password=None)
self.assertDsnEqual(dsn, "dbname=foo user=bar")
@skip_before_libpq(9, 2)
def test_url_is_cool(self):
url = 'postgresql://tester:secret@/test?application_name=wat'
dsn = ext.make_dsn(url)
self.assertEqual(dsn, url)
dsn = ext.make_dsn(url, application_name='woot')
self.assertDsnEqual(dsn,
'dbname=test user=tester password=secret application_name=woot')
self.assertRaises(psycopg2.ProgrammingError,
ext.make_dsn, 'postgresql://tester:secret@/test?nosuch=param')
self.assertRaises(psycopg2.ProgrammingError,
ext.make_dsn, url, nosuch="param")
@skip_before_libpq(9, 3)
def test_get_dsn_parameters(self):
conn = self.connect()
d = conn.get_dsn_parameters()
self.assertEqual(d['dbname'], dbname) # the only param we can check reliably
self.assert_('password' not in d, d)
class IsolationLevelsTestCase(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
conn = self.connect()
cur = conn.cursor()
try:
cur.execute("drop table isolevel;")
except psycopg2.ProgrammingError:
conn.rollback()
cur.execute("create table isolevel (id integer);")
conn.commit()
conn.close()
def test_isolation_level(self):
conn = self.connect()
self.assertEqual(
conn.isolation_level,
ext.ISOLATION_LEVEL_DEFAULT)
def test_encoding(self):
conn = self.connect()
self.assert_(conn.encoding in ext.encodings)
def test_set_isolation_level(self):
conn = self.connect()
curs = conn.cursor()
levels = [
('read uncommitted',
ext.ISOLATION_LEVEL_READ_UNCOMMITTED),
('read committed', ext.ISOLATION_LEVEL_READ_COMMITTED),
('repeatable read', ext.ISOLATION_LEVEL_REPEATABLE_READ),
('serializable', ext.ISOLATION_LEVEL_SERIALIZABLE),
]
for name, level in levels:
conn.set_isolation_level(level)
# the only values available on prehistoric PG versions
if conn.server_version < 80000:
if level in (
ext.ISOLATION_LEVEL_READ_UNCOMMITTED,
ext.ISOLATION_LEVEL_REPEATABLE_READ):
name, level = levels[levels.index((name, level)) + 1]
self.assertEqual(conn.isolation_level, level)
curs.execute('show transaction_isolation;')
got_name = curs.fetchone()[0]
self.assertEqual(name, got_name)
conn.commit()
self.assertRaises(ValueError, conn.set_isolation_level, -1)
self.assertRaises(ValueError, conn.set_isolation_level, 5)
def test_set_isolation_level_autocommit(self):
conn = self.connect()
curs = conn.cursor()
conn.set_isolation_level(ext.ISOLATION_LEVEL_AUTOCOMMIT)
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_DEFAULT)
self.assert_(conn.autocommit)
conn.isolation_level = 'serializable'
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE)
self.assert_(conn.autocommit)
curs.execute('show transaction_isolation;')
self.assertEqual(curs.fetchone()[0], 'serializable')
def test_set_isolation_level_default(self):
conn = self.connect()
curs = conn.cursor()
conn.autocommit = True
curs.execute("set default_transaction_isolation to 'read committed'")
conn.autocommit = False
conn.set_isolation_level(ext.ISOLATION_LEVEL_SERIALIZABLE)
self.assertEqual(conn.isolation_level,
ext.ISOLATION_LEVEL_SERIALIZABLE)
curs.execute("show transaction_isolation")
self.assertEqual(curs.fetchone()[0], "serializable")
conn.rollback()
conn.set_isolation_level(ext.ISOLATION_LEVEL_DEFAULT)
curs.execute("show transaction_isolation")
self.assertEqual(curs.fetchone()[0], "read committed")
def test_set_isolation_level_abort(self):
conn = self.connect()
cur = conn.cursor()
self.assertEqual(ext.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("insert into isolevel values (10);")
self.assertEqual(ext.TRANSACTION_STATUS_INTRANS,
conn.get_transaction_status())
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("select count(*) from isolevel;")
self.assertEqual(0, cur.fetchone()[0])
cur.execute("insert into isolevel values (10);")
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_INTRANS,
conn.get_transaction_status())
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("select count(*) from isolevel;")
self.assertEqual(0, cur.fetchone()[0])
cur.execute("insert into isolevel values (10);")
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("select count(*) from isolevel;")
self.assertEqual(1, cur.fetchone()[0])
self.assertEqual(conn.isolation_level,
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
def test_isolation_level_autocommit(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(ext.ISOLATION_LEVEL_AUTOCOMMIT)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
def test_isolation_level_read_committed(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(ext.ISOLATION_LEVEL_READ_COMMITTED)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("insert into isolevel values (20);")
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cnn1.commit()
cur2.execute("select count(*) from isolevel;")
self.assertEqual(2, cur2.fetchone()[0])
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
cnn2.commit()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(2, cur1.fetchone()[0])
def test_isolation_level_serializable(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(ext.ISOLATION_LEVEL_SERIALIZABLE)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("insert into isolevel values (20);")
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cnn1.commit()
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
cnn2.commit()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(2, cur1.fetchone()[0])
cur2.execute("select count(*) from isolevel;")
self.assertEqual(2, cur2.fetchone()[0])
def test_isolation_level_closed(self):
cnn = self.connect()
cnn.close()
self.assertRaises(psycopg2.InterfaceError,
cnn.set_isolation_level, 0)
self.assertRaises(psycopg2.InterfaceError,
cnn.set_isolation_level, 1)
def test_setattr_isolation_level_int(self):
cur = self.conn.cursor()
self.conn.isolation_level = ext.ISOLATION_LEVEL_SERIALIZABLE
self.assertEqual(self.conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.isolation_level = ext.ISOLATION_LEVEL_REPEATABLE_READ
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_REPEATABLE_READ)
self.assertEqual(cur.fetchone()[0], 'repeatable read')
else:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_SERIALIZABLE)
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.isolation_level = ext.ISOLATION_LEVEL_READ_COMMITTED
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_READ_COMMITTED)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.isolation_level = ext.ISOLATION_LEVEL_READ_UNCOMMITTED
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_READ_UNCOMMITTED)
self.assertEqual(cur.fetchone()[0], 'read uncommitted')
else:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_READ_COMMITTED)
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.assertEqual(ext.ISOLATION_LEVEL_DEFAULT, None)
self.conn.isolation_level = ext.ISOLATION_LEVEL_DEFAULT
self.assertEqual(self.conn.isolation_level, None)
cur.execute("SHOW transaction_isolation;")
isol = cur.fetchone()[0]
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], isol)
def test_setattr_isolation_level_str(self):
cur = self.conn.cursor()
self.conn.isolation_level = "serializable"
self.assertEqual(self.conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.isolation_level = "repeatable read"
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_REPEATABLE_READ)
self.assertEqual(cur.fetchone()[0], 'repeatable read')
else:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_SERIALIZABLE)
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.isolation_level = "read committed"
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_READ_COMMITTED)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.isolation_level = "read uncommitted"
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_READ_UNCOMMITTED)
self.assertEqual(cur.fetchone()[0], 'read uncommitted')
else:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_READ_COMMITTED)
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.isolation_level = "default"
self.assertEqual(self.conn.isolation_level, None)
cur.execute("SHOW transaction_isolation;")
isol = cur.fetchone()[0]
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], isol)
def test_setattr_isolation_level_invalid(self):
self.assertRaises(ValueError, setattr, self.conn, 'isolation_level', 0)
self.assertRaises(ValueError, setattr, self.conn, 'isolation_level', -1)
self.assertRaises(ValueError, setattr, self.conn, 'isolation_level', 5)
self.assertRaises(ValueError, setattr, self.conn, 'isolation_level', 'bah')
class ConnectionTwoPhaseTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
self.make_test_table()
self.clear_test_xacts()
def tearDown(self):
self.clear_test_xacts()
ConnectingTestCase.tearDown(self)
def clear_test_xacts(self):
"""Rollback all the prepared transaction in the testing db."""
cnn = self.connect()
cnn.set_isolation_level(0)
cur = cnn.cursor()
try:
cur.execute(
"select gid from pg_prepared_xacts where database = %s",
(dbname,))
except psycopg2.ProgrammingError:
cnn.rollback()
cnn.close()
return
gids = [r[0] for r in cur]
for gid in gids:
cur.execute("rollback prepared %s;", (gid,))
cnn.close()
def make_test_table(self):
cnn = self.connect()
cur = cnn.cursor()
try:
cur.execute("DROP TABLE test_tpc;")
except psycopg2.ProgrammingError:
cnn.rollback()
cur.execute("CREATE TABLE test_tpc (data text);")
cnn.commit()
cnn.close()
def count_xacts(self):
"""Return the number of prepared xacts currently in the test db."""
cnn = self.connect()
cur = cnn.cursor()
cur.execute("""
select count(*) from pg_prepared_xacts
where database = %s;""",
(dbname,))
rv = cur.fetchone()[0]
cnn.close()
return rv
def count_test_records(self):
"""Return the number of records in the test table."""
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select count(*) from test_tpc;")
rv = cur.fetchone()[0]
cnn.close()
return rv
def test_tpc_commit(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
self.assertEqual(cnn.status, ext.STATUS_PREPARED)
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_commit()
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_commit_one_phase(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_1p');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_commit()
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_commit_recovered(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_rec');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
cnn.close()
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
cnn.tpc_commit(xid)
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_rollback(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_rollback');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
self.assertEqual(cnn.status, ext.STATUS_PREPARED)
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_rollback()
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_tpc_rollback_one_phase(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_rollback_1p');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_rollback()
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_tpc_rollback_recovered(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_rec');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
cnn.close()
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
cnn.tpc_rollback(xid)
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_status_after_recover(self):
cnn = self.connect()
self.assertEqual(ext.STATUS_READY, cnn.status)
cnn.tpc_recover()
self.assertEqual(ext.STATUS_READY, cnn.status)
cur = cnn.cursor()
cur.execute("select 1")
self.assertEqual(ext.STATUS_BEGIN, cnn.status)
cnn.tpc_recover()
self.assertEqual(ext.STATUS_BEGIN, cnn.status)
def test_recovered_xids(self):
# insert a few test xns
cnn = self.connect()
cnn.set_isolation_level(0)
cur = cnn.cursor()
cur.execute("begin; prepare transaction '1-foo';")
cur.execute("begin; prepare transaction '2-bar';")
# read the values to return
cur.execute("""
select gid, prepared, owner, database
from pg_prepared_xacts
where database = %s;""",
(dbname,))
okvals = cur.fetchall()
okvals.sort()
cnn = self.connect()
xids = cnn.tpc_recover()
xids = [xid for xid in xids if xid.database == dbname]
xids.sort(key=attrgetter('gtrid'))
# check the values returned
self.assertEqual(len(okvals), len(xids))
for (xid, (gid, prepared, owner, database)) in zip(xids, okvals):
self.assertEqual(xid.gtrid, gid)
self.assertEqual(xid.prepared, prepared)
self.assertEqual(xid.owner, owner)
self.assertEqual(xid.database, database)
def test_xid_encoding(self):
cnn = self.connect()
xid = cnn.xid(42, "gtrid", "bqual")
cnn.tpc_begin(xid)
cnn.tpc_prepare()
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select gid from pg_prepared_xacts where database = %s;",
(dbname,))
self.assertEqual('42_Z3RyaWQ=_YnF1YWw=', cur.fetchone()[0])
@slow
def test_xid_roundtrip(self):
for fid, gtrid, bqual in [
(0, "", ""),
(42, "gtrid", "bqual"),
(0x7fffffff, "x" * 64, "y" * 64),
]:
cnn = self.connect()
xid = cnn.xid(fid, gtrid, bqual)
cnn.tpc_begin(xid)
cnn.tpc_prepare()
cnn.close()
cnn = self.connect()
xids = [x for x in cnn.tpc_recover() if x.database == dbname]
self.assertEqual(1, len(xids))
xid = xids[0]
self.assertEqual(xid.format_id, fid)
self.assertEqual(xid.gtrid, gtrid)
self.assertEqual(xid.bqual, bqual)
cnn.tpc_rollback(xid)
@slow
def test_unparsed_roundtrip(self):
for tid in [
'',
'hello, world!',
'x' * 199, # PostgreSQL's limit in transaction id length
]:
cnn = self.connect()
cnn.tpc_begin(tid)
cnn.tpc_prepare()
cnn.close()
cnn = self.connect()
xids = [x for x in cnn.tpc_recover() if x.database == dbname]
self.assertEqual(1, len(xids))
xid = xids[0]
self.assertEqual(xid.format_id, None)
self.assertEqual(xid.gtrid, tid)
self.assertEqual(xid.bqual, None)
cnn.tpc_rollback(xid)
def test_xid_construction(self):
from psycopg2.extensions import Xid
x1 = Xid(74, 'foo', 'bar')
self.assertEqual(74, x1.format_id)
self.assertEqual('foo', x1.gtrid)
self.assertEqual('bar', x1.bqual)
def test_xid_from_string(self):
from psycopg2.extensions import Xid
x2 = Xid.from_string('42_Z3RyaWQ=_YnF1YWw=')
self.assertEqual(42, x2.format_id)
self.assertEqual('gtrid', x2.gtrid)
self.assertEqual('bqual', x2.bqual)
x3 = Xid.from_string('99_xxx_yyy')
self.assertEqual(None, x3.format_id)
self.assertEqual('99_xxx_yyy', x3.gtrid)
self.assertEqual(None, x3.bqual)
def test_xid_to_string(self):
from psycopg2.extensions import Xid
x1 = Xid.from_string('42_Z3RyaWQ=_YnF1YWw=')
self.assertEqual(str(x1), '42_Z3RyaWQ=_YnF1YWw=')
x2 = Xid.from_string('99_xxx_yyy')
self.assertEqual(str(x2), '99_xxx_yyy')
def test_xid_unicode(self):
cnn = self.connect()
x1 = cnn.xid(10, u'uni', u'code')
cnn.tpc_begin(x1)
cnn.tpc_prepare()
cnn.reset()
xid = [x for x in cnn.tpc_recover() if x.database == dbname][0]
self.assertEqual(10, xid.format_id)
self.assertEqual('uni', xid.gtrid)
self.assertEqual('code', xid.bqual)
def test_xid_unicode_unparsed(self):
# We don't expect people shooting snowmen as transaction ids,
# so if something explodes in an encode error I don't mind.
# Let's just check uniconde is accepted as type.
cnn = self.connect()
cnn.set_client_encoding('utf8')
cnn.tpc_begin(u"transaction-id")
cnn.tpc_prepare()
cnn.reset()
xid = [x for x in cnn.tpc_recover() if x.database == dbname][0]
self.assertEqual(None, xid.format_id)
self.assertEqual('transaction-id', xid.gtrid)
self.assertEqual(None, xid.bqual)
def test_cancel_fails_prepared(self):
cnn = self.connect()
cnn.tpc_begin('cancel')
cnn.tpc_prepare()
self.assertRaises(psycopg2.ProgrammingError, cnn.cancel)
def test_tpc_recover_non_dbapi_connection(self):
from psycopg2.extras import RealDictConnection
cnn = self.connect(connection_factory=RealDictConnection)
cnn.tpc_begin('dict-connection')
cnn.tpc_prepare()
cnn.reset()
xids = cnn.tpc_recover()
xid = [x for x in xids if x.database == dbname][0]
self.assertEqual(None, xid.format_id)
self.assertEqual('dict-connection', xid.gtrid)
self.assertEqual(None, xid.bqual)
decorate_all_tests(ConnectionTwoPhaseTests, skip_if_tpc_disabled)
class TransactionControlTests(ConnectingTestCase):
def test_closed(self):
self.conn.close()
self.assertRaises(psycopg2.InterfaceError,
self.conn.set_session,
ext.ISOLATION_LEVEL_SERIALIZABLE)
def test_not_in_transaction(self):
cur = self.conn.cursor()
cur.execute("select 1")
self.assertRaises(psycopg2.ProgrammingError,
self.conn.set_session,
ext.ISOLATION_LEVEL_SERIALIZABLE)
def test_set_isolation_level(self):
cur = self.conn.cursor()
self.conn.set_session(
ext.ISOLATION_LEVEL_SERIALIZABLE)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session(
ext.ISOLATION_LEVEL_REPEATABLE_READ)
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'repeatable read')
else:
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session(
isolation_level=ext.ISOLATION_LEVEL_READ_COMMITTED)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.set_session(
isolation_level=ext.ISOLATION_LEVEL_READ_UNCOMMITTED)
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'read uncommitted')
else:
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
def test_set_isolation_level_str(self):
cur = self.conn.cursor()
self.conn.set_session("serializable")
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session("repeatable read")
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'repeatable read')
else:
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session("read committed")
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.set_session("read uncommitted")
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'read uncommitted')
else:
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
def test_bad_isolation_level(self):
self.assertRaises(ValueError, self.conn.set_session, 0)
self.assertRaises(ValueError, self.conn.set_session, 5)
self.assertRaises(ValueError, self.conn.set_session, 'whatever')
def test_set_read_only(self):
self.assert_(self.conn.readonly is None)
cur = self.conn.cursor()
self.conn.set_session(readonly=True)
self.assert_(self.conn.readonly is True)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
self.conn.set_session(readonly=False)
self.assert_(self.conn.readonly is False)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'off')
self.conn.rollback()
def test_setattr_read_only(self):
cur = self.conn.cursor()
self.conn.readonly = True
self.assert_(self.conn.readonly is True)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.assertRaises(self.conn.ProgrammingError,
setattr, self.conn, 'readonly', False)
self.assert_(self.conn.readonly is True)
self.conn.rollback()
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur = self.conn.cursor()
self.conn.readonly = None
self.assert_(self.conn.readonly is None)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'off') # assume defined by server
self.conn.rollback()
self.conn.readonly = False
self.assert_(self.conn.readonly is False)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'off')
self.conn.rollback()
def test_set_default(self):
cur = self.conn.cursor()
cur.execute("SHOW transaction_isolation;")
isolevel = cur.fetchone()[0]
cur.execute("SHOW transaction_read_only;")
readonly = cur.fetchone()[0]
self.conn.rollback()
self.conn.set_session(isolation_level='serializable', readonly=True)
self.conn.set_session(isolation_level='default', readonly='default')
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], isolevel)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], readonly)
@skip_before_postgres(9, 1)
def test_set_deferrable(self):
self.assert_(self.conn.deferrable is None)
cur = self.conn.cursor()
self.conn.set_session(readonly=True, deferrable=True)
self.assert_(self.conn.deferrable is True)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
self.conn.set_session(deferrable=False)
self.assert_(self.conn.deferrable is False)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'off')
self.conn.rollback()
@skip_after_postgres(9, 1)
def test_set_deferrable_error(self):
self.assertRaises(psycopg2.ProgrammingError,
self.conn.set_session, readonly=True, deferrable=True)
self.assertRaises(psycopg2.ProgrammingError,
setattr, self.conn, 'deferrable', True)
@skip_before_postgres(9, 1)
def test_setattr_deferrable(self):
cur = self.conn.cursor()
self.conn.deferrable = True
self.assert_(self.conn.deferrable is True)
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'on')
self.assertRaises(self.conn.ProgrammingError,
setattr, self.conn, 'deferrable', False)
self.assert_(self.conn.deferrable is True)
self.conn.rollback()
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur = self.conn.cursor()
self.conn.deferrable = None
self.assert_(self.conn.deferrable is None)
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'off') # assume defined by server
self.conn.rollback()
self.conn.deferrable = False
self.assert_(self.conn.deferrable is False)
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'off')
self.conn.rollback()
def test_mixing_session_attribs(self):
cur = self.conn.cursor()
self.conn.autocommit = True
self.conn.readonly = True
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.autocommit = False
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'off')
class AutocommitTests(ConnectingTestCase):
def test_closed(self):
self.conn.close()
self.assertRaises(psycopg2.InterfaceError,
setattr, self.conn, 'autocommit', True)
# The getter doesn't have a guard. We may change this in future
# to make it consistent with other methods; meanwhile let's just check
# it doesn't explode.
try:
self.assert_(self.conn.autocommit in (True, False))
except psycopg2.InterfaceError:
pass
def test_default_no_autocommit(self):
self.assert_(not self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertEqual(self.conn.status, ext.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_INTRANS)
self.conn.rollback()
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
def test_set_autocommit(self):
self.conn.autocommit = True
self.assert_(self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
self.conn.autocommit = False
self.assert_(not self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
cur.execute('select 1;')
self.assertEqual(self.conn.status, ext.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_INTRANS)
def test_set_intrans_error(self):
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertRaises(psycopg2.ProgrammingError,
setattr, self.conn, 'autocommit', True)
def test_set_session_autocommit(self):
self.conn.set_session(autocommit=True)
self.assert_(self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
self.conn.set_session(autocommit=False)
self.assert_(not self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
cur.execute('select 1;')
self.assertEqual(self.conn.status, ext.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_INTRANS)
self.conn.rollback()
self.conn.set_session('serializable', readonly=True, autocommit=True)
self.assert_(self.conn.autocommit)
cur.execute('select 1;')
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
sampler.py
|
import threading
import atexit
import time
try:
import Queue as queue
except ImportError:
import queue
from .scoreboard import Scoreboard
import mod_wsgi
class Sampler(object):
sample_interval = 1.0
report_interval = 60.0
def __init__(self):
self.running = False
self.lock = threading.Lock()
self.sampler_queue = queue.Queue()
self.sampler_thread = threading.Thread(target=self.sampler_loop)
self.sampler_thread.setDaemon(True)
self.consumer_queue = queue.Queue()
self.consumer_thread = threading.Thread(target=self.consumer_loop)
self.consumer_thread.setDaemon(True)
self.consumers = []
def register(self, callback):
self.consumers.append(callback)
def consumer_loop(self):
while True:
scoreboard = self.consumer_queue.get()
for consumer in self.consumers:
consumer(scoreboard)
if scoreboard.sampler_exiting:
return
def distribute(self, scoreboard):
self.consumer_queue.put(scoreboard)
def sampler_loop(self):
scoreboard = Scoreboard()
scheduled_time = time.time()
period_end_time = scheduled_time + self.report_interval
while True:
try:
# We want to collect metrics on a regular second
# interval so we need to align the timeout value.
now = time.time()
scheduled_time += self.sample_interval
timeout = max(0, scheduled_time - now)
self.sampler_queue.get(timeout=timeout)
# If we get here we have been notified to exit.
# We update the scoreboard one last time and then
# distribute it to any consumers.
scoreboard.update(rollover=True, exiting=True)
self.distribute(scoreboard)
return
except queue.Empty:
pass
# Update the scoreboard for the current sampling period.
# Need to check first whether after we will be rolling it
# over for next sampling period as well so can do any
# special end of sampling period actions.
now = time.time()
if now >= period_end_time:
scoreboard.update(rollover=True)
# Distribute scoreboard to any consumers. It
# is expected that they will read but not update
# as same instance is used for all.
self.distribute(scoreboard)
period_end_time += self.report_interval
# Rollover to a new scoreboard for the next
# sampling period.
scoreboard = scoreboard.rollover()
else:
scoreboard.update(rollover=False)
def terminate(self):
try:
self.sampler_queue.put(None)
except Exception:
pass
self.sampler_thread.join()
self.consumer_thread.join()
def start(self):
if mod_wsgi.server_metrics() is None:
return
with self.lock:
if not self.running:
self.running = True
atexit.register(self.terminate)
self.sampler_thread.start()
self.consumer_thread.start()
|
thread_handler.py
|
# -*-: coding utf-8 -*-
""" Thread handler. """
import threading
import time
from .singleton import Singleton
class ThreadHandler(Singleton):
""" Thread handler. """
def __init__(self):
""" Initialisation. """
self.thread_pool = []
self.run_events = []
def run(self, target, args=()):
""" Run a function in a separate thread.
:param target: the function to run.
:param args: the parameters to pass to the function.
"""
run_event = threading.Event()
run_event.set()
thread = threading.Thread(target=target, args=args + (run_event, ))
thread.daemon = True
self.thread_pool.append(thread)
self.run_events.append(run_event)
thread.start()
def start_run_loop(self):
""" Start the thread handler, ensuring that everything stops property
when sending a keyboard interrupt.
"""
try:
while 1:
time.sleep(.1)
except (KeyboardInterrupt, SystemExit):
self.stop()
def stop(self):
""" Stop all functions running in the thread handler."""
for run_event in self.run_events:
run_event.clear()
for thread in self.thread_pool:
thread.join()
|
monitor.py
|
#!/usr/bin/env python
# Copyright (c) 2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2 License
# The full license information can be found in LICENSE.txt
# in the root directory of this project.
"""
-App for Resource Monitoring.
-Collects System CPU/Memory as well as AXON CPU/ Memory.
"""
import logging
import os
import psutil
import queue
import threading
import time
from axon.apps.base import app_registry, exposed, exposify, BaseApp
from axon.db.record import ResourceRecord
log = logging.getLogger(__name__)
@exposify
class ResourceMonitor(BaseApp):
NAME = 'RESOURCE_MONITOR'
def __init__(self, rqueue, interval=3, proc_name='runner'):
"""
A simple resource monitor that writes cpu / memory percentage
to wavefront at requested interval.
"""
self._rqueue = rqueue # records queue to put records onto.
self._interval = interval
self._switch = threading.Event()
self._proc_name = proc_name
self._thread = None
def _run(self):
p = psutil.Process(os.getpid())
while self._switch.is_set():
t = int(time.time())
sys_cpu_percent = round(psutil.cpu_percent(), 2)
sys_mem_percent = round(psutil.virtual_memory().percent, 2)
sys_net_conns = int(len(psutil.net_connections()))
axon_cpu_percent = round(p.cpu_percent(), 2)
axon_mem_percent = round(p.memory_percent(), 2)
axon_net_conns = int(len(p.connections()))
rec = ResourceRecord(sys_cpu_percent, sys_mem_percent,
sys_net_conns,
axon_cpu_percent, axon_mem_percent,
axon_net_conns)
try:
self._rqueue.put(rec, block=False, timeout=2)
except queue.Full:
log.error("Cann't put Resource record %r into the queue.",
rec)
time.sleep(self._interval)
@exposed
def is_running(self):
"""
Returns True if Rescoures are being monitored else False.
"""
return self._thread and self._thread.is_alive()
@exposed
def stop(self):
"""
Stops Resource Monitoring.
"""
self._switch.clear()
if self.is_running():
self._thread.join()
self._thread = None
@exposed
def start(self):
"""
Starts Resource monitoring (in a separate thread)
"""
self._switch.set()
if not self._thread:
self._thread = threading.Thread(target=self._run)
self._thread.setDaemon(True)
self._thread.start()
def initialize(self):
self.start()
def shutdown(self):
self.stop()
app_registry[ResourceMonitor.NAME] = ResourceMonitor
|
test_app.py
|
import json
import random
import threading
import tornado.websocket
import tornado.gen
from tornado.testing import AsyncHTTPTestCase
from tornado.httpclient import HTTPError
from tornado.options import options
from tests.sshserver import run_ssh_server, banner
from tests.utils import encode_multipart_formdata, read_file, make_tests_data_path # noqa
from webssh import handler
from webssh.main import make_app, make_handlers
from webssh.settings import (
get_app_settings, get_server_settings, max_body_size
)
from webssh.utils import to_str
from webssh.worker import clients
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
handler.DELAY = 0.1
swallow_http_errors = handler.swallow_http_errors
class TestAppBase(AsyncHTTPTestCase):
def get_httpserver_options(self):
return get_server_settings(options)
def assert_response(self, bstr, response):
if swallow_http_errors:
self.assertEqual(response.code, 200)
self.assertIn(bstr, response.body)
else:
self.assertEqual(response.code, 400)
self.assertIn(b'Bad Request', response.body)
def assert_status_in(self, status, data):
self.assertIsNone(data['encoding'])
self.assertIsNone(data['id'])
self.assertIn(status, data['status'])
def assert_status_equal(self, status, data):
self.assertIsNone(data['encoding'])
self.assertIsNone(data['id'])
self.assertEqual(status, data['status'])
def assert_status_none(self, data):
self.assertIsNotNone(data['encoding'])
self.assertIsNotNone(data['id'])
self.assertIsNone(data['status'])
def fetch_request(self, url, method='GET', body='', headers={}, sync=True):
if not sync and url.startswith('/'):
url = self.get_url(url)
if isinstance(body, dict):
body = urlencode(body)
if not headers:
headers = self.headers
else:
headers.update(self.headers)
client = self if sync else self.get_http_client()
return client.fetch(url, method=method, body=body, headers=headers)
def sync_post(self, url, body, headers={}):
return self.fetch_request(url, 'POST', body, headers)
def async_post(self, url, body, headers={}):
return self.fetch_request(url, 'POST', body, headers, sync=False)
class TestAppBasic(TestAppBase):
running = [True]
sshserver_port = 2200
body = 'hostname=127.0.0.1&port={}&_xsrf=yummy&username=robey&password=foo'.format(sshserver_port) # noqa
headers = {'Cookie': '_xsrf=yummy'}
def get_app(self):
self.body_dict = {
'hostname': '127.0.0.1',
'port': str(self.sshserver_port),
'username': 'robey',
'password': '',
'_xsrf': 'yummy'
}
loop = self.io_loop
options.debug = False
options.policy = random.choice(['warning', 'autoadd'])
options.hostfile = ''
options.syshostfile = ''
options.tdstream = ''
app = make_app(make_handlers(loop, options), get_app_settings(options))
return app
@classmethod
def setUpClass(cls):
print('='*20)
t = threading.Thread(
target=run_ssh_server, args=(cls.sshserver_port, cls.running)
)
t.setDaemon(True)
t.start()
@classmethod
def tearDownClass(cls):
cls.running.pop()
print('='*20)
def test_app_with_invalid_form_for_missing_argument(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
body = 'port=7000&username=admin&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Missing argument hostname', response)
body = 'hostname=127.0.0.1&port=7000&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Missing argument username', response)
body = 'hostname=&port=&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Missing value hostname', response)
body = 'hostname=127.0.0.1&port=7000&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Missing value username', response)
def test_app_with_invalid_form_for_invalid_value(self):
body = 'hostname=127.0.0&port=22&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Invalid hostname', response)
body = 'hostname=http://www.googe.com&port=22&username=&password&_xsrf=yummy' # noqa
response = self.sync_post('/', body)
self.assert_response(b'Invalid hostname', response)
body = 'hostname=127.0.0.1&port=port&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Invalid port', response)
body = 'hostname=127.0.0.1&port=70000&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Invalid port', response)
def test_app_with_wrong_hostname_ip(self):
body = 'hostname=127.0.0.2&port=2200&username=admin&_xsrf=yummy'
response = self.sync_post('/', body)
self.assertEqual(response.code, 200)
self.assertIn(b'Unable to connect to', response.body)
def test_app_with_wrong_hostname_domain(self):
body = 'hostname=xxxxxxxxxxxx&port=2200&username=admin&_xsrf=yummy'
response = self.sync_post('/', body)
self.assertEqual(response.code, 200)
self.assertIn(b'Unable to connect to', response.body)
def test_app_with_wrong_port(self):
body = 'hostname=127.0.0.1&port=7000&username=admin&_xsrf=yummy'
response = self.sync_post('/', body)
self.assertEqual(response.code, 200)
self.assertIn(b'Unable to connect to', response.body)
def test_app_with_wrong_credentials(self):
response = self.sync_post('/', self.body + 's')
self.assert_status_in('Authentication failed.', json.loads(to_str(response.body))) # noqa
def test_app_with_correct_credentials(self):
response = self.sync_post('/', self.body)
self.assert_status_none(json.loads(to_str(response.body)))
def test_app_with_correct_credentials_but_with_no_port(self):
default_port = handler.DEFAULT_PORT
handler.DEFAULT_PORT = self.sshserver_port
# with no port value
body = self.body.replace(str(self.sshserver_port), '')
response = self.sync_post('/', body)
self.assert_status_none(json.loads(to_str(response.body)))
# with no port argument
body = body.replace('port=&', '')
response = self.sync_post('/', body)
self.assert_status_none(json.loads(to_str(response.body)))
handler.DEFAULT_PORT = default_port
@tornado.testing.gen_test
def test_app_with_correct_credentials_timeout(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
yield tornado.gen.sleep(handler.DELAY + 0.1)
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertEqual(ws.close_reason, 'Websocket authentication failed.')
@tornado.testing.gen_test
def test_app_with_correct_credentials_but_ip_not_matched(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
clients = handler.clients
handler.clients = {}
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertEqual(ws.close_reason, 'Websocket authentication failed.')
handler.clients = clients
@tornado.testing.gen_test
def test_app_with_correct_credentials_user_robey(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
ws.close()
@tornado.testing.gen_test
def test_app_with_correct_credentials_but_without_id_argument(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws'
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertIn('Missing argument id', ws.close_reason)
@tornado.testing.gen_test
def test_app_with_correct_credentials_but_empty_id(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id='
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertIn('Missing value id', ws.close_reason)
@tornado.testing.gen_test
def test_app_with_correct_credentials_but_wrong_id(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=1' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertIn('Websocket authentication failed', ws.close_reason)
@tornado.testing.gen_test
def test_app_with_correct_credentials_user_bar(self):
body = self.body.replace('robey', 'bar')
url = self.get_url('/')
response = yield self.async_post(url, body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
# messages below will be ignored silently
yield ws.write_message('hello')
yield ws.write_message('"hello"')
yield ws.write_message('[hello]')
yield ws.write_message(json.dumps({'resize': []}))
yield ws.write_message(json.dumps({'resize': {}}))
yield ws.write_message(json.dumps({'resize': 'ab'}))
yield ws.write_message(json.dumps({'resize': ['a', 'b']}))
yield ws.write_message(json.dumps({'resize': {'a': 1, 'b': 2}}))
yield ws.write_message(json.dumps({'resize': [100]}))
yield ws.write_message(json.dumps({'resize': [100]*10}))
yield ws.write_message(json.dumps({'resize': [-1, -1]}))
yield ws.write_message(json.dumps({'data': [1]}))
yield ws.write_message(json.dumps({'data': (1,)}))
yield ws.write_message(json.dumps({'data': {'a': 2}}))
yield ws.write_message(json.dumps({'data': 1}))
yield ws.write_message(json.dumps({'data': 2.1}))
yield ws.write_message(json.dumps({'key-non-existed': 'hello'}))
# end - those just for testing webssh websocket stablity
yield ws.write_message(json.dumps({'resize': [79, 23]}))
msg = yield ws.read_message()
self.assertEqual(b'resized', msg)
yield ws.write_message(json.dumps({'data': 'bye'}))
msg = yield ws.read_message()
self.assertEqual(b'bye', msg)
ws.close()
@tornado.testing.gen_test
def test_app_auth_with_valid_pubkey_by_urlencoded_form(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
self.body_dict.update(privatekey=privatekey)
response = yield self.async_post(url, self.body_dict)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
ws.close()
@tornado.testing.gen_test
def test_app_auth_with_valid_pubkey_by_multipart_form(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
response = yield self.async_post(url, body, headers=headers)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
ws.close()
@tornado.testing.gen_test
def test_app_auth_with_invalid_pubkey_for_user_robey(self):
url = self.get_url('/')
privatekey = 'h' * 1024
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
if swallow_http_errors:
response = yield self.async_post(url, body, headers=headers)
self.assertIn(b'Invalid key', response.body)
else:
with self.assertRaises(HTTPError) as ctx:
yield self.async_post(url, body, headers=headers)
self.assertIn('Bad Request', ctx.exception.message)
@tornado.testing.gen_test
def test_app_auth_with_pubkey_exceeds_key_max_size(self):
url = self.get_url('/')
privatekey = 'h' * (handler.PrivateKey.max_length + 1)
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
if swallow_http_errors:
response = yield self.async_post(url, body, headers=headers)
self.assertIn(b'Invalid key', response.body)
else:
with self.assertRaises(HTTPError) as ctx:
yield self.async_post(url, body, headers=headers)
self.assertIn('Bad Request', ctx.exception.message)
@tornado.testing.gen_test
def test_app_auth_with_pubkey_cannot_be_decoded_by_multipart_form(self):
url = self.get_url('/')
privatekey = 'h' * 1024
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
body = body.encode('utf-8')
# added some gbk bytes to the privatekey, make it cannot be decoded
body = body[:-100] + b'\xb4\xed\xce\xf3' + body[-100:]
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
if swallow_http_errors:
response = yield self.async_post(url, body, headers=headers)
self.assertIn(b'Invalid unicode', response.body)
else:
with self.assertRaises(HTTPError) as ctx:
yield self.async_post(url, body, headers=headers)
self.assertIn('Bad Request', ctx.exception.message)
def test_app_post_form_with_large_body_size_by_multipart_form(self):
privatekey = 'h' * (2 * max_body_size)
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
response = self.sync_post('/', body, headers=headers)
self.assertIn(response.code, [400, 599])
def test_app_post_form_with_large_body_size_by_urlencoded_form(self):
privatekey = 'h' * (2 * max_body_size)
body = self.body + '&privatekey=' + privatekey
response = self.sync_post('/', body)
self.assertIn(response.code, [400, 599])
@tornado.testing.gen_test
def test_app_with_user_keyonly_for_bad_authentication_type(self):
self.body_dict.update(username='keyonly', password='foo')
response = yield self.async_post('/', self.body_dict)
self.assertEqual(response.code, 200)
self.assert_status_in('Bad authentication type', json.loads(to_str(response.body))) # noqa
@tornado.testing.gen_test
def test_app_with_user_pass2fa_with_correct_passwords(self):
self.body_dict.update(username='pass2fa', password='password',
totp='passcode')
response = yield self.async_post('/', self.body_dict)
self.assertEqual(response.code, 200)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
@tornado.testing.gen_test
def test_app_with_user_pass2fa_with_wrong_pkey_correct_passwords(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
self.body_dict.update(username='pass2fa', password='password',
privatekey=privatekey, totp='passcode')
response = yield self.async_post(url, self.body_dict)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
@tornado.testing.gen_test
def test_app_with_user_pkey2fa_with_correct_passwords(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
self.body_dict.update(username='pkey2fa', password='password',
privatekey=privatekey, totp='passcode')
response = yield self.async_post(url, self.body_dict)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
@tornado.testing.gen_test
def test_app_with_user_pkey2fa_with_wrong_password(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
self.body_dict.update(username='pkey2fa', password='wrongpassword',
privatekey=privatekey, totp='passcode')
response = yield self.async_post(url, self.body_dict)
data = json.loads(to_str(response.body))
self.assert_status_in('Authentication failed', data)
@tornado.testing.gen_test
def test_app_with_user_pkey2fa_with_wrong_passcode(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
self.body_dict.update(username='pkey2fa', password='password',
privatekey=privatekey, totp='wrongpasscode')
response = yield self.async_post(url, self.body_dict)
data = json.loads(to_str(response.body))
self.assert_status_in('Authentication failed', data)
@tornado.testing.gen_test
def test_app_with_user_pkey2fa_with_empty_passcode(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
self.body_dict.update(username='pkey2fa', password='password',
privatekey=privatekey, totp='')
response = yield self.async_post(url, self.body_dict)
data = json.loads(to_str(response.body))
self.assert_status_in('Need a verification code', data)
class OtherTestBase(TestAppBase):
sshserver_port = 3300
headers = {'Cookie': '_xsrf=yummy'}
debug = False
policy = None
xsrf = True
hostfile = ''
syshostfile = ''
tdstream = ''
maxconn = 20
origin = 'same'
encodings = []
body = {
'hostname': '127.0.0.1',
'port': '',
'username': 'robey',
'password': 'foo',
'_xsrf': 'yummy'
}
def get_app(self):
self.body.update(port=str(self.sshserver_port))
loop = self.io_loop
options.debug = self.debug
options.xsrf = self.xsrf
options.policy = self.policy if self.policy else random.choice(['warning', 'autoadd']) # noqa
options.hostfile = self.hostfile
options.syshostfile = self.syshostfile
options.tdstream = self.tdstream
options.maxconn = self.maxconn
options.origin = self.origin
app = make_app(make_handlers(loop, options), get_app_settings(options))
return app
def setUp(self):
print('='*20)
self.running = True
OtherTestBase.sshserver_port += 1
t = threading.Thread(
target=run_ssh_server,
args=(self.sshserver_port, self.running, self.encodings)
)
t.setDaemon(True)
t.start()
super(OtherTestBase, self).setUp()
def tearDown(self):
self.running = False
print('='*20)
super(OtherTestBase, self).tearDown()
class TestAppInDebugMode(OtherTestBase):
debug = True
def assert_response(self, bstr, response):
if swallow_http_errors:
self.assertEqual(response.code, 200)
self.assertIn(bstr, response.body)
else:
self.assertEqual(response.code, 500)
self.assertIn(b'Uncaught exception', response.body)
def test_server_error_for_post_method(self):
body = dict(self.body, error='raise')
response = self.sync_post('/', body)
self.assert_response(b'"status": "Internal Server Error"', response)
def test_html(self):
response = self.fetch('/', method='GET')
self.assertIn(b'novalidate>', response.body)
class TestAppWithLargeBuffer(OtherTestBase):
@tornado.testing.gen_test
def test_app_for_sending_message_with_large_size(self):
url = self.get_url('/')
response = yield self.async_post(url, dict(self.body, username='foo'))
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
send = 'h' * (64 * 1024) + '\r\n\r\n'
yield ws.write_message(json.dumps({'data': send}))
lst = []
while True:
msg = yield ws.read_message()
lst.append(msg)
if msg.endswith(b'\r\n\r\n'):
break
recv = b''.join(lst).decode(data['encoding'])
self.assertEqual(send, recv)
ws.close()
class TestAppWithRejectPolicy(OtherTestBase):
policy = 'reject'
hostfile = make_tests_data_path('known_hosts_example')
@tornado.testing.gen_test
def test_app_with_hostname_not_in_hostkeys(self):
response = yield self.async_post('/', self.body)
data = json.loads(to_str(response.body))
message = 'Connection to {}:{} is not allowed.'.format(self.body['hostname'], self.sshserver_port) # noqa
self.assertEqual(message, data['status'])
class TestAppWithBadHostKey(OtherTestBase):
policy = random.choice(['warning', 'autoadd', 'reject'])
hostfile = make_tests_data_path('test_known_hosts')
def setUp(self):
self.sshserver_port = 2222
super(TestAppWithBadHostKey, self).setUp()
@tornado.testing.gen_test
def test_app_with_bad_host_key(self):
response = yield self.async_post('/', self.body)
data = json.loads(to_str(response.body))
self.assertEqual('Bad host key.', data['status'])
class TestAppWithTrustedStream(OtherTestBase):
tdstream = '127.0.0.2'
def test_with_forbidden_get_request(self):
response = self.fetch('/', method='GET')
self.assertEqual(response.code, 403)
self.assertIn('Forbidden', response.error.message)
def test_with_forbidden_post_request(self):
response = self.sync_post('/', self.body)
self.assertEqual(response.code, 403)
self.assertIn('Forbidden', response.error.message)
def test_with_forbidden_put_request(self):
response = self.fetch_request('/', method='PUT', body=self.body)
self.assertEqual(response.code, 403)
self.assertIn('Forbidden', response.error.message)
class TestAppNotFoundHandler(OtherTestBase):
custom_headers = handler.MixinHandler.custom_headers
def test_with_not_found_get_request(self):
response = self.fetch('/pathnotfound', method='GET')
self.assertEqual(response.code, 404)
self.assertEqual(
response.headers['Server'], self.custom_headers['Server']
)
self.assertIn(b'404: Not Found', response.body)
def test_with_not_found_post_request(self):
response = self.sync_post('/pathnotfound', self.body)
self.assertEqual(response.code, 404)
self.assertEqual(
response.headers['Server'], self.custom_headers['Server']
)
self.assertIn(b'404: Not Found', response.body)
def test_with_not_found_put_request(self):
response = self.fetch_request('/pathnotfound', method='PUT',
body=self.body)
self.assertEqual(response.code, 404)
self.assertEqual(
response.headers['Server'], self.custom_headers['Server']
)
self.assertIn(b'404: Not Found', response.body)
class TestAppWithHeadRequest(OtherTestBase):
def test_with_index_path(self):
response = self.fetch('/', method='HEAD')
self.assertEqual(response.code, 200)
def test_with_ws_path(self):
response = self.fetch('/ws', method='HEAD')
self.assertEqual(response.code, 405)
def test_with_not_found_path(self):
response = self.fetch('/notfound', method='HEAD')
self.assertEqual(response.code, 404)
class TestAppWithPutRequest(OtherTestBase):
xsrf = False
@tornado.testing.gen_test
def test_app_with_method_not_supported(self):
with self.assertRaises(HTTPError) as ctx:
yield self.fetch_request('/', 'PUT', self.body, sync=False)
self.assertIn('Method Not Allowed', ctx.exception.message)
class TestAppWithTooManyConnections(OtherTestBase):
maxconn = 1
def setUp(self):
clients.clear()
super(TestAppWithTooManyConnections, self).setUp()
@tornado.testing.gen_test
def test_app_with_too_many_connections(self):
clients['127.0.0.1'] = {'fake_worker_id': None}
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assertEqual('Too many live connections.', data['status'])
clients['127.0.0.1'].clear()
response = yield self.async_post(url, self.body)
self.assert_status_none(json.loads(to_str(response.body)))
class TestAppWithCrossOriginOperation(OtherTestBase):
origin = 'http://www.example.com'
@tornado.testing.gen_test
def test_app_with_wrong_event_origin(self):
body = dict(self.body, _origin='localhost')
response = yield self.async_post('/', body)
self.assert_status_equal('Cross origin operation is not allowed.', json.loads(to_str(response.body))) # noqa
@tornado.testing.gen_test
def test_app_with_wrong_header_origin(self):
headers = dict(Origin='localhost')
response = yield self.async_post('/', self.body, headers=headers)
self.assert_status_equal('Cross origin operation is not allowed.', json.loads(to_str(response.body)), ) # noqa
@tornado.testing.gen_test
def test_app_with_correct_event_origin(self):
body = dict(self.body, _origin=self.origin)
response = yield self.async_post('/', body)
self.assert_status_none(json.loads(to_str(response.body)))
self.assertIsNone(response.headers.get('Access-Control-Allow-Origin'))
@tornado.testing.gen_test
def test_app_with_correct_header_origin(self):
headers = dict(Origin=self.origin)
response = yield self.async_post('/', self.body, headers=headers)
self.assert_status_none(json.loads(to_str(response.body)))
self.assertEqual(
response.headers.get('Access-Control-Allow-Origin'), self.origin
)
class TestAppWithBadEncoding(OtherTestBase):
encodings = [u'\u7f16\u7801']
@tornado.testing.gen_test
def test_app_with_a_bad_encoding(self):
response = yield self.async_post('/', self.body)
dic = json.loads(to_str(response.body))
self.assert_status_none(dic)
self.assertIn(dic['encoding'], ['UTF-8', 'GBK'])
class TestAppWithUnknownEncoding(OtherTestBase):
encodings = [u'\u7f16\u7801', u'UnknownEncoding']
@tornado.testing.gen_test
def test_app_with_a_unknown_encoding(self):
response = yield self.async_post('/', self.body)
self.assert_status_none(json.loads(to_str(response.body)))
dic = json.loads(to_str(response.body))
self.assert_status_none(dic)
self.assertEqual(dic['encoding'], 'utf-8')
|
main.py
|
import redis
from sys import argv
from utils.modelmanager import ModelManager
from utils.phrasegenerator import PhraseGenerator
from threading import Thread
host = ('host', argv[argv.index('--redis-host') + 1]) if '--redis-host' in argv else None
port = ('port', int(argv[argv.index('--redis-port') + 1])) if '--redis-port' in argv else None
db = ('db', int(argv[argv.index('--redis-db') + 1])) if '--redis-db' in argv else None
modelonly = True if '--model-only' in argv else False
generatoronly = True if '--generator-only' in argv else False
redisargs = dict([arg for arg in [host, port, db] if arg])
r = redis.Redis(**redisargs)
mmgr = Thread(target=ModelManager(r).startpolling)
pgen = Thread(target=PhraseGenerator(r).startgenerating)
if __name__ == '__main__':
if not generatoronly:
mmgr.start()
if not modelonly:
pgen.start()
|
pos.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
This Document is Created by At 2018/7/9
in this document, we will implement pos(proof of stake) in python
"""
import time
import json
import threading
from hashlib import sha256
from datetime import datetime
from random import choice
from queue import Queue, Empty
from socketserver import BaseRequestHandler, ThreadingTCPServer
# need two queue
# 定义变量
block_chain = []
temp_blocks = []
candidate_blocks = Queue() # 创建队列,用于线程间通信
announcements = Queue()
validators = {}
My_Lock = threading.Lock()
def generate_block(oldblock, bpm, address):
"""
:param oldblock:
:param bpm:
:param address:
:return:
"""
newblock = {
"Index": oldblock["Index"] + 1,
"BPM": bpm,
"Timestamp": str(datetime.now()),
"PrevHash": oldblock["Hash"],
"Validator": address
}
newblock["Hash"] = calculate_hash(newblock)
return newblock
def calculate_hash(block):
record = "".join([
str(block["Index"]),
str(block["BPM"]),
block["Timestamp"],
block["PrevHash"]
])
return sha256(record.encode()).hexdigest()
def is_block_valid(newblock, oldblock):
"""
:param newblock:
:param oldblock:
:return:
"""
if oldblock["Index"] + 1 != newblock["Index"]:
return False
if oldblock["Hash"] != newblock["PrevHash"]:
return False
if calculate_hash(newblock) != newblock["Hash"]:
return False
return True
def pick_winner(announcements):
"""
选择记账人
:param announcements:
:return:
"""
time.sleep(10)
while True:
with My_Lock:
temp = temp_blocks
lottery_pool = [] #
if temp:
for block in temp:
if block["Validator"] not in lottery_pool:
set_validators = validators
k = set_validators.get(block["Validator"])
if k:
for i in range(k):
lottery_pool.append(block["Validator"])
lottery_winner = choice(lottery_pool)
print(lottery_winner)
# add block of winner to blockchain and let all the other nodes known
for block in temp:
if block["Validator"] == lottery_winner:
with My_Lock:
block_chain.append(block)
# write message in queue.
msg = "\n{0} 赢得了记账权利\n".format(lottery_winner)
announcements.put(msg)
break
with My_Lock:
temp_blocks.clear()
class HandleConn(BaseRequestHandler):
def handle(self):
print("Got connection from", self.client_address)
# validator address
self.request.send(b"Enter token balance:")
balance = self.request.recv(8192)
try:
balance = int(balance)
except Exception as e:
print(e)
t = str(datetime.now())
address = sha256(t.encode()).hexdigest()
validators[address] = balance
print(validators)
while True:
announce_winner_t = threading.Thread(target=annouce_winner, args=(announcements, self.request,),
daemon=True)
announce_winner_t.start()
self.request.send(b"\nEnter a new BPM:")
bpm = self.request.recv(8192)
try:
bpm = int(bpm)
except Exception as e:
print(e)
del validators[address]
break
# with My_Lock:
last_block = block_chain[-1]
new_block = generate_block(last_block, bpm, address)
if is_block_valid(new_block, last_block):
print("new block is valid!")
candidate_blocks.put(new_block)
self.request.send(b"\nEnter a new BPM:\n")
annouce_blockchain_t = threading.Thread(target=annouce_blockchain, args=(self.request,), daemon=True)
annouce_blockchain_t.start()
def annouce_winner(announcements, request):
"""
:param announcements:
:param request:
:return:
"""
while True:
try:
msg = announcements.get(block=False)
request.send(msg.encode())
request.send(b'\n')
except Empty:
time.sleep(3)
continue
def annouce_blockchain(request):
"""
:param request:
:return:
"""
while True:
time.sleep(30)
with My_Lock:
output = json.dumps(block_chain)
try:
request.send(output.encode())
request.send(b'\n')
except OSError:
pass
def candidate(candidate_blocks):
"""
:param candidate_blocks:
:return:
"""
while True:
try:
candi = candidate_blocks.get(block=False)
except Empty:
time.sleep(5)
continue
temp_blocks.append(candi)
def run():
# create a genesis block
t = str(datetime.now())
genesis_block = {
"Index": 0,
"Timestamp": t,
"BPM": 0,
"PrevHash": "",
"Validator": ""
}
genesis_block["Hash"] = calculate_hash(genesis_block)
print(genesis_block)
block_chain.append(genesis_block)
thread_canditate = threading.Thread(target=candidate, args=(candidate_blocks,), daemon=True)
thread_pick = threading.Thread(target=pick_winner, args=(announcements,), daemon=True)
thread_canditate.start()
thread_pick.start()
# start a tcp server
serv = ThreadingTCPServer(('', 9090), HandleConn)
serv.serve_forever()
if __name__ == '__main__':
run()
|
__init__.py
|
from socketserver import ThreadingUDPServer, DatagramRequestHandler
from socketserver import ThreadingTCPServer, BaseRequestHandler, StreamRequestHandler
from threading import Thread
from types import FunctionType
__name__ = 'PortableServers'
__version__ = '1.0.0'
__author__ = 'Ren'
__description__ = 'Portable servers for everyone! :P'
__url__ = 'https://steamcommunity.com/id/SamXDR/'
__all__ = [
'SocketServerUDP',
'SocketServerTCP',
'StreamSocketServerTCP',
]
class SocketServerUDP(ThreadingUDPServer):
class __Request(DatagramRequestHandler):
def setup(self):
super().setup()
self.selfmain.setup(self)
def handle(self):
super().handle()
self.selfmain.handle(self)
def finish(self):
super().finish()
self.selfmain.finish(self)
def __init__(self, addr, **kwargs):
self.thread = Thread(target=self.serve_forever, daemon=True)
self.__Request.selfmain = self
self.setup = FunctionType(self.setup.__code__, self.setup.__globals__)
self.handle = FunctionType(self.handle.__code__, self.handle.__globals__)
self.finish = FunctionType(self.finish.__code__, self.finish.__globals__)
super().__init__(addr, self.__Request, True)
def start(self):
self.thread.start()
def stop(self):
self.thread.stop()
def IsAlive(self):
self.thread.is_alive()
def __GetDaemon(self) -> bool:
return self.thread.daemon
def __SetDaemon(self, daemonic:bool):
if isinstance(daemonic, bool) != True:
return
self.thread.daemon = daemonic
def __DelDaemon(self):
return
daemon = property(__GetDaemon, __SetDaemon, __DelDaemon)
def setup(self):
pass
def handle(self):
pass
def finish(self):
pass
class SocketServerTCP(ThreadingTCPServer):
class __Request(BaseRequestHandler):
def setup(self):
super().setup()
self.selfmain.setup(self)
def handle(self):
super().handle()
self.selfmain.handle(self)
def finish(self):
super().finish()
self.selfmain.finish(self)
def __init__(self, addr, **kwargs):
self.thread = Thread(target=self.serve_forever, daemon=True)
self.__Request.selfmain = self
self.setup = FunctionType(self.setup.__code__, self.setup.__globals__)
self.handle = FunctionType(self.handle.__code__, self.handle.__globals__)
self.finish = FunctionType(self.finish.__code__, self.finish.__globals__)
super().__init__(addr, self.__Request, True)
def start(self):
self.thread.start()
def stop(self):
self.thread.stop()
def IsAlive(self):
self.thread.is_alive()
def __GetDaemon(self) -> bool:
return self.thread.daemon
def __SetDaemon(self, daemonic:bool):
if isinstance(daemonic, bool) != True:
return
self.thread.daemon = daemonic
def __DelDaemon(self):
return
daemon = property(__GetDaemon, __SetDaemon, __DelDaemon)
def setup(self):
pass
def handle(self):
pass
def finish(self):
pass
class StreamSocketServerTCP(ThreadingTCPServer):
class __Request(StreamRequestHandler):
def setup(self):
super().setup()
self.selfmain.setup(self)
def handle(self):
super().handle()
self.selfmain.handle(self)
def finish(self):
super().finish()
self.selfmain.finish(self)
def __init__(self, addr, **kwargs):
self.thread = Thread(target=self.serve_forever, daemon=True)
self.__Request.selfmain = self
self.setup = FunctionType(self.setup.__code__, self.setup.__globals__)
self.handle = FunctionType(self.handle.__code__, self.handle.__globals__)
self.finish = FunctionType(self.finish.__code__, self.finish.__globals__)
super().__init__(addr, self.__Request, True)
def start(self):
self.thread.start()
def stop(self):
self.thread.stop()
def IsAlive(self):
self.thread.is_alive()
def __GetDaemon(self) -> bool:
return self.thread.daemon
def __SetDaemon(self, daemonic:bool):
if isinstance(daemonic, bool) != True:
return
self.thread.daemon = daemonic
def __DelDaemon(self):
return
daemon = property(__GetDaemon, __SetDaemon, __DelDaemon)
def setup(self):
pass
def handle(self):
pass
def finish(self):
pass
|
main.py
|
import sys
import threading
from Coach import Coach
from chessaz.ChessGame import ChessGame as Game
from chessaz.pytorch.NNet import NNetWrapper as nn
from utils import *
args = dotdict({
'numIters': 5,
'numEps': 2, # Number of complete self-play games to simulate during a new iteration.
'tempThreshold': 15, #
'updateThreshold': 0.5, # During arena playoff, new neural net will be accepted if threshold or more of games are won.
'maxlenOfQueue': 200000, # Number of game examples to train the neural networks.
'numMCTSSims': 2, # Number of games moves for MCTS to simulate.
'arenaCompare': 2, # Number of games to play during arena play to determine if new net will be accepted.
'cpuct': 1,
'checkpoint': '/home/john/PycharmProjects/ift-6756_RL_project/training/',
'load_model': False,
'load_folder_file': ('/home/john/PycharmProjects/ift-6756_RL_project/checkpoint/', 'checkpoint.pth.tar'),
'numItersForTrainExamplesHistory': 20,
})
if __name__ == "__main__":
my_game = Game()
nnet = nn(my_game)
if args.load_model:
nnet.load_checkpoint(args.load_folder_file[0], args.load_folder_file[1])
c = Coach(my_game, nnet, args)
if args.load_model:
print("Load trainExamples from file")
c.loadTrainExamples()
sys.setrecursionlimit(100000)
# threading.stack_size(200000000)
# thread = threading.Thread(target=c.learn())
# thread.start()
c.learn()
|
interpreter.py
|
# Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
from .common import CMakeException, CMakeTarget
from .client import CMakeClient, RequestCMakeInputs, RequestConfigure, RequestCompute, RequestCodeModel
from .fileapi import CMakeFileAPI
from .executor import CMakeExecutor
from .traceparser import CMakeTraceParser, CMakeGeneratorTarget
from .. import mlog
from ..environment import Environment
from ..mesonlib import MachineChoice, version_compare
from ..compilers.compilers import lang_suffixes, header_suffixes, obj_suffixes, lib_suffixes, is_header
from subprocess import Popen, PIPE
from threading import Thread
from enum import Enum
from functools import lru_cache
import typing as T
import os, re
from ..mparser import (
Token,
BaseNode,
CodeBlockNode,
FunctionNode,
ArrayNode,
ArgumentNode,
AssignmentNode,
BooleanNode,
StringNode,
IdNode,
IndexNode,
MethodNode,
NumberNode,
)
if T.TYPE_CHECKING:
from ..build import Build
from ..backend.backends import Backend
# Disable all warnings automaticall enabled with --trace and friends
# See https://cmake.org/cmake/help/latest/variable/CMAKE_POLICY_WARNING_CMPNNNN.html
disable_policy_warnings = [
'CMP0025',
'CMP0047',
'CMP0056',
'CMP0060',
'CMP0065',
'CMP0066',
'CMP0067',
'CMP0082',
'CMP0089',
]
backend_generator_map = {
'ninja': 'Ninja',
'xcode': 'Xcode',
'vs2010': 'Visual Studio 10 2010',
'vs2015': 'Visual Studio 15 2017',
'vs2017': 'Visual Studio 15 2017',
'vs2019': 'Visual Studio 16 2019',
}
language_map = {
'c': 'C',
'cpp': 'CXX',
'cuda': 'CUDA',
'cs': 'CSharp',
'java': 'Java',
'fortran': 'Fortran',
'swift': 'Swift',
}
target_type_map = {
'STATIC_LIBRARY': 'static_library',
'MODULE_LIBRARY': 'shared_module',
'SHARED_LIBRARY': 'shared_library',
'EXECUTABLE': 'executable',
'OBJECT_LIBRARY': 'static_library',
'INTERFACE_LIBRARY': 'header_only'
}
skip_targets = ['UTILITY']
blacklist_compiler_flags = [
'-Wall', '-Wextra', '-Weverything', '-Werror', '-Wpedantic', '-pedantic', '-w',
'/W1', '/W2', '/W3', '/W4', '/Wall', '/WX', '/w',
'/O1', '/O2', '/Ob', '/Od', '/Og', '/Oi', '/Os', '/Ot', '/Ox', '/Oy', '/Ob0',
'/RTC1', '/RTCc', '/RTCs', '/RTCu',
'/Z7', '/Zi', '/ZI',
]
blacklist_link_flags = [
'/machine:x64', '/machine:x86', '/machine:arm', '/machine:ebc',
'/debug', '/debug:fastlink', '/debug:full', '/debug:none',
'/incremental',
]
blacklist_clang_cl_link_flags = ['/GR', '/EHsc', '/MDd', '/Zi', '/RTC1']
blacklist_link_libs = [
'kernel32.lib',
'user32.lib',
'gdi32.lib',
'winspool.lib',
'shell32.lib',
'ole32.lib',
'oleaut32.lib',
'uuid.lib',
'comdlg32.lib',
'advapi32.lib'
]
generated_target_name_prefix = 'cm_'
transfer_dependencies_from = ['header_only']
class OutputTargetMap:
rm_so_version = re.compile(r'(\.[0-9]+)+$')
def __init__(self, build_dir: str):
self.tgt_map = {}
self.build_dir = build_dir
def add(self, tgt: T.Union['ConverterTarget', 'ConverterCustomTarget']) -> None:
def assign_keys(keys: T.List[str]) -> None:
for i in [x for x in keys if x]:
self.tgt_map[i] = tgt
keys = [self._target_key(tgt.cmake_name)]
if isinstance(tgt, ConverterTarget):
keys += [tgt.full_name]
keys += [self._rel_artifact_key(x) for x in tgt.artifacts]
keys += [self._base_artifact_key(x) for x in tgt.artifacts]
if isinstance(tgt, ConverterCustomTarget):
keys += [self._rel_generated_file_key(x) for x in tgt.original_outputs]
keys += [self._base_generated_file_key(x) for x in tgt.original_outputs]
assign_keys(keys)
def _return_first_valid_key(self, keys: T.List[str]) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
for i in keys:
if i and i in self.tgt_map:
return self.tgt_map[i]
return None
def target(self, name: str) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
return self._return_first_valid_key([self._target_key(name)])
def artifact(self, name: str) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
keys = []
candidates = [name, OutputTargetMap.rm_so_version.sub('', name)]
for i in lib_suffixes:
if not name.endswith('.' + i):
continue
new_name = name[:-len(i) - 1]
new_name = OutputTargetMap.rm_so_version.sub('', new_name)
candidates += ['{}.{}'.format(new_name, i)]
for i in candidates:
keys += [self._rel_artifact_key(i), os.path.basename(i), self._base_artifact_key(i)]
return self._return_first_valid_key(keys)
def generated(self, name: str) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
return self._return_first_valid_key([self._rel_generated_file_key(name), self._base_generated_file_key(name)])
# Utility functions to generate local keys
def _rel_path(self, fname: str) -> T.Optional[str]:
fname = os.path.normpath(os.path.join(self.build_dir, fname))
if os.path.commonpath([self.build_dir, fname]) != self.build_dir:
return None
return os.path.relpath(fname, self.build_dir)
def _target_key(self, tgt_name: str) -> str:
return '__tgt_{}__'.format(tgt_name)
def _rel_generated_file_key(self, fname: str) -> T.Optional[str]:
path = self._rel_path(fname)
return '__relgen_{}__'.format(path) if path else None
def _base_generated_file_key(self, fname: str) -> str:
return '__gen_{}__'.format(os.path.basename(fname))
def _rel_artifact_key(self, fname: str) -> T.Optional[str]:
path = self._rel_path(fname)
return '__relart_{}__'.format(path) if path else None
def _base_artifact_key(self, fname: str) -> str:
return '__art_{}__'.format(os.path.basename(fname))
class ConverterTarget:
def __init__(self, target: CMakeTarget, env: Environment):
self.env = env
self.artifacts = target.artifacts
self.src_dir = target.src_dir
self.build_dir = target.build_dir
self.name = target.name
self.cmake_name = target.name
self.full_name = target.full_name
self.type = target.type
self.install = target.install
self.install_dir = ''
self.link_libraries = target.link_libraries
self.link_flags = target.link_flags + target.link_lang_flags
self.depends_raw = []
self.depends = []
if target.install_paths:
self.install_dir = target.install_paths[0]
self.languages = []
self.sources = []
self.generated = []
self.includes = []
self.sys_includes = []
self.link_with = []
self.object_libs = []
self.compile_opts = {}
self.public_compile_opts = []
self.pie = False
# Project default override options (c_std, cpp_std, etc.)
self.override_options = []
# Convert the target name to a valid meson target name
self.name = self.name.replace('-', '_')
self.name = generated_target_name_prefix + self.name
for i in target.files:
# Determine the meson language
lang_cmake_to_meson = {val.lower(): key for key, val in language_map.items()}
lang = lang_cmake_to_meson.get(i.language.lower(), 'c')
if lang not in self.languages:
self.languages += [lang]
if lang not in self.compile_opts:
self.compile_opts[lang] = []
# Add arguments, but avoid duplicates
args = i.flags
args += ['-D{}'.format(x) for x in i.defines]
self.compile_opts[lang] += [x for x in args if x not in self.compile_opts[lang]]
# Handle include directories
self.includes += [x['path'] for x in i.includes if x not in self.includes and not x['isSystem']]
self.sys_includes += [x['path'] for x in i.includes if x not in self.sys_includes and x['isSystem']]
# Add sources to the right array
if i.is_generated:
self.generated += i.sources
else:
self.sources += i.sources
def __repr__(self) -> str:
return '<{}: {}>'.format(self.__class__.__name__, self.name)
std_regex = re.compile(r'([-]{1,2}std=|/std:v?|[-]{1,2}std:)(.*)')
def postprocess(self, output_target_map: OutputTargetMap, root_src_dir: str, subdir: str, install_prefix: str, trace: CMakeTraceParser) -> None:
# Detect setting the C and C++ standard
for i in ['c', 'cpp']:
if i not in self.compile_opts:
continue
temp = []
for j in self.compile_opts[i]:
m = ConverterTarget.std_regex.match(j)
if m:
self.override_options += ['{}_std={}'.format(i, m.group(2))]
elif j in ['-fPIC', '-fpic', '-fPIE', '-fpie']:
self.pie = True
elif j in blacklist_compiler_flags:
pass
else:
temp += [j]
self.compile_opts[i] = temp
# Make sure to force enable -fPIC for OBJECT libraries
if self.type.upper() == 'OBJECT_LIBRARY':
self.pie = True
# Use the CMake trace, if required
tgt = trace.targets.get(self.cmake_name)
if tgt:
self.depends_raw = trace.targets[self.cmake_name].depends
if self.type.upper() == 'INTERFACE_LIBRARY':
props = tgt.properties
self.includes += props.get('INTERFACE_INCLUDE_DIRECTORIES', [])
self.public_compile_opts += props.get('INTERFACE_COMPILE_DEFINITIONS', [])
self.public_compile_opts += props.get('INTERFACE_COMPILE_OPTIONS', [])
self.link_flags += props.get('INTERFACE_LINK_OPTIONS', [])
# TODO refactor this copy paste from CMakeDependency for future releases
reg_is_lib = re.compile(r'^(-l[a-zA-Z0-9_]+|-l?pthread)$')
to_process = [self.cmake_name]
processed = []
while len(to_process) > 0:
curr = to_process.pop(0)
if curr in processed or curr not in trace.targets:
continue
tgt = trace.targets[curr]
cfgs = []
cfg = ''
otherDeps = []
libraries = []
mlog.debug(tgt)
if 'INTERFACE_COMPILE_DEFINITIONS' in tgt.properties:
self.public_compile_opts += ['-D' + re.sub('^-D', '', x) for x in tgt.properties['INTERFACE_COMPILE_DEFINITIONS'] if x]
if 'INTERFACE_COMPILE_OPTIONS' in tgt.properties:
self.public_compile_opts += [x for x in tgt.properties['INTERFACE_COMPILE_OPTIONS'] if x]
if 'IMPORTED_CONFIGURATIONS' in tgt.properties:
cfgs += [x for x in tgt.properties['IMPORTED_CONFIGURATIONS'] if x]
cfg = cfgs[0]
if 'CONFIGURATIONS' in tgt.properties:
cfgs += [x for x in tgt.properties['CONFIGURATIONS'] if x]
cfg = cfgs[0]
if 'RELEASE' in cfgs:
cfg = 'RELEASE'
if 'IMPORTED_IMPLIB_{}'.format(cfg) in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_IMPLIB_{}'.format(cfg)] if x]
elif 'IMPORTED_IMPLIB' in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_IMPLIB'] if x]
elif 'IMPORTED_LOCATION_{}'.format(cfg) in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_LOCATION_{}'.format(cfg)] if x]
elif 'IMPORTED_LOCATION' in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_LOCATION'] if x]
if 'LINK_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['LINK_LIBRARIES'] if x]
if 'INTERFACE_LINK_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['INTERFACE_LINK_LIBRARIES'] if x]
if 'IMPORTED_LINK_DEPENDENT_LIBRARIES_{}'.format(cfg) in tgt.properties:
otherDeps += [x for x in tgt.properties['IMPORTED_LINK_DEPENDENT_LIBRARIES_{}'.format(cfg)] if x]
elif 'IMPORTED_LINK_DEPENDENT_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['IMPORTED_LINK_DEPENDENT_LIBRARIES'] if x]
for j in otherDeps:
if j in trace.targets:
to_process += [j]
elif reg_is_lib.match(j) or os.path.exists(j):
libraries += [j]
for j in libraries:
if j not in self.link_libraries:
self.link_libraries += [j]
processed += [curr]
elif self.type.upper() not in ['EXECUTABLE', 'OBJECT_LIBRARY']:
mlog.warning('CMake: Target', mlog.bold(self.cmake_name), 'not found in CMake trace. This can lead to build errors')
temp = []
for i in self.link_libraries:
# Let meson handle this arcane magic
if ',-rpath,' in i:
continue
if not os.path.isabs(i):
link_with = output_target_map.artifact(i)
if link_with:
self.link_with += [link_with]
continue
temp += [i]
self.link_libraries = temp
# Filter out files that are not supported by the language
supported = list(header_suffixes) + list(obj_suffixes)
for i in self.languages:
supported += list(lang_suffixes[i])
supported = ['.{}'.format(x) for x in supported]
self.sources = [x for x in self.sources if any([x.endswith(y) for y in supported])]
self.generated = [x for x in self.generated if any([x.endswith(y) for y in supported])]
# Make paths relative
def rel_path(x: str, is_header: bool, is_generated: bool) -> T.Optional[str]:
if not os.path.isabs(x):
x = os.path.normpath(os.path.join(self.src_dir, x))
if not os.path.exists(x) and not any([x.endswith(y) for y in obj_suffixes]) and not is_generated:
mlog.warning('CMake: path', mlog.bold(x), 'does not exist. Ignoring. This can lead to build errors')
return None
if os.path.isabs(x) and os.path.commonpath([x, self.env.get_build_dir()]) == self.env.get_build_dir():
if is_header:
return os.path.relpath(x, os.path.join(self.env.get_build_dir(), subdir))
else:
return os.path.relpath(x, root_src_dir)
if os.path.isabs(x) and os.path.commonpath([x, root_src_dir]) == root_src_dir:
return os.path.relpath(x, root_src_dir)
return x
def custom_target(x: str):
ctgt = output_target_map.generated(x)
if ctgt:
assert(isinstance(ctgt, ConverterCustomTarget))
ref = ctgt.get_ref(x)
assert(isinstance(ref, CustomTargetReference) and ref.valid())
return ref
return x
build_dir_rel = os.path.relpath(self.build_dir, os.path.join(self.env.get_build_dir(), subdir))
self.includes = list(set([rel_path(x, True, False) for x in set(self.includes)] + [build_dir_rel]))
self.sys_includes = list(set([rel_path(x, True, False) for x in set(self.sys_includes)]))
self.sources = [rel_path(x, False, False) for x in self.sources]
self.generated = [rel_path(x, False, True) for x in self.generated]
# Resolve custom targets
self.generated = [custom_target(x) for x in self.generated]
# Remove delete entries
self.includes = [x for x in self.includes if x is not None]
self.sys_includes = [x for x in self.sys_includes if x is not None]
self.sources = [x for x in self.sources if x is not None]
self.generated = [x for x in self.generated if x is not None]
# Make sure '.' is always in the include directories
if '.' not in self.includes:
self.includes += ['.']
# make install dir relative to the install prefix
if self.install_dir and os.path.isabs(self.install_dir):
if os.path.commonpath([self.install_dir, install_prefix]) == install_prefix:
self.install_dir = os.path.relpath(self.install_dir, install_prefix)
# Remove blacklisted options and libs
def check_flag(flag: str) -> bool:
if flag.lower() in blacklist_link_flags or flag in blacklist_compiler_flags + blacklist_clang_cl_link_flags:
return False
if flag.startswith('/D'):
return False
return True
self.link_libraries = [x for x in self.link_libraries if x.lower() not in blacklist_link_libs]
self.link_flags = [x for x in self.link_flags if check_flag(x)]
# Handle explicit CMake add_dependency() calls
for i in self.depends_raw:
tgt = output_target_map.target(i)
if tgt:
self.depends.append(tgt)
def process_object_libs(self, obj_target_list: T.List['ConverterTarget'], linker_workaround: bool):
# Try to detect the object library(s) from the generated input sources
temp = [x for x in self.generated if isinstance(x, str)]
temp = [os.path.basename(x) for x in temp]
temp = [x for x in temp if any([x.endswith('.' + y) for y in obj_suffixes])]
temp = [os.path.splitext(x)[0] for x in temp]
exts = self._all_source_suffixes()
# Temp now stores the source filenames of the object files
for i in obj_target_list:
source_files = [x for x in i.sources + i.generated if isinstance(x, str)]
source_files = [os.path.basename(x) for x in source_files]
for j in temp:
# On some platforms (specifically looking at you Windows with vs20xy backend) CMake does
# not produce object files with the format `foo.cpp.obj`, instead it skipps the language
# suffix and just produces object files like `foo.obj`. Thus we have to do our best to
# undo this step and guess the correct language suffix of the object file. This is done
# by trying all language suffixes meson knows and checking if one of them fits.
candidates = [j] # type: T.List[str]
if not any([j.endswith('.' + x) for x in exts]):
mlog.warning('Object files do not contain source file extensions, thus falling back to guessing them.', once=True)
candidates += ['{}.{}'.format(j, x) for x in exts]
if any([x in source_files for x in candidates]):
if linker_workaround:
self._append_objlib_sources(i)
else:
self.includes += i.includes
self.includes = list(set(self.includes))
self.object_libs += [i]
break
# Filter out object files from the sources
self.generated = [x for x in self.generated if not isinstance(x, str) or not any([x.endswith('.' + y) for y in obj_suffixes])]
def _append_objlib_sources(self, tgt: 'ConverterTarget') -> None:
self.includes += tgt.includes
self.sources += tgt.sources
self.generated += tgt.generated
self.sources = list(set(self.sources))
self.generated = list(set(self.generated))
self.includes = list(set(self.includes))
# Inherit compiler arguments since they may be required for building
for lang, opts in tgt.compile_opts.items():
if lang not in self.compile_opts:
self.compile_opts[lang] = []
self.compile_opts[lang] += [x for x in opts if x not in self.compile_opts[lang]]
@lru_cache(maxsize=None)
def _all_source_suffixes(self) -> T.List[str]:
suffixes = [] # type: T.List[str]
for exts in lang_suffixes.values():
suffixes += [x for x in exts]
return suffixes
def process_inter_target_dependencies(self):
# Move the dependencies from all transfer_dependencies_from to the target
to_process = list(self.depends)
processed = []
new_deps = []
for i in to_process:
processed += [i]
if isinstance(i, ConverterTarget) and i.meson_func() in transfer_dependencies_from:
to_process += [x for x in i.depends if x not in processed]
else:
new_deps += [i]
self.depends = list(set(new_deps))
def cleanup_dependencies(self):
# Clear the dependencies from targets that where moved from
if self.meson_func() in transfer_dependencies_from:
self.depends = []
def meson_func(self) -> str:
return target_type_map.get(self.type.upper())
def log(self) -> None:
mlog.log('Target', mlog.bold(self.name), '({})'.format(self.cmake_name))
mlog.log(' -- artifacts: ', mlog.bold(str(self.artifacts)))
mlog.log(' -- full_name: ', mlog.bold(self.full_name))
mlog.log(' -- type: ', mlog.bold(self.type))
mlog.log(' -- install: ', mlog.bold('true' if self.install else 'false'))
mlog.log(' -- install_dir: ', mlog.bold(self.install_dir))
mlog.log(' -- link_libraries: ', mlog.bold(str(self.link_libraries)))
mlog.log(' -- link_with: ', mlog.bold(str(self.link_with)))
mlog.log(' -- object_libs: ', mlog.bold(str(self.object_libs)))
mlog.log(' -- link_flags: ', mlog.bold(str(self.link_flags)))
mlog.log(' -- languages: ', mlog.bold(str(self.languages)))
mlog.log(' -- includes: ', mlog.bold(str(self.includes)))
mlog.log(' -- sys_includes: ', mlog.bold(str(self.sys_includes)))
mlog.log(' -- sources: ', mlog.bold(str(self.sources)))
mlog.log(' -- generated: ', mlog.bold(str(self.generated)))
mlog.log(' -- pie: ', mlog.bold('true' if self.pie else 'false'))
mlog.log(' -- override_opts: ', mlog.bold(str(self.override_options)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
mlog.log(' -- options:')
for key, val in self.compile_opts.items():
mlog.log(' -', key, '=', mlog.bold(str(val)))
class CustomTargetReference:
def __init__(self, ctgt: 'ConverterCustomTarget', index: int):
self.ctgt = ctgt # type: ConverterCustomTarget
self.index = index # type: int
def __repr__(self) -> str:
if self.valid():
return '<{}: {} [{}]>'.format(self.__class__.__name__, self.ctgt.name, self.ctgt.outputs[self.index])
else:
return '<{}: INVALID REFERENCE>'.format(self.__class__.__name__)
def valid(self) -> bool:
return self.ctgt is not None and self.index >= 0
def filename(self) -> str:
return self.ctgt.outputs[self.index]
class ConverterCustomTarget:
tgt_counter = 0 # type: int
out_counter = 0 # type: int
def __init__(self, target: CMakeGeneratorTarget):
self.name = target.name
if not self.name:
self.name = 'custom_tgt_{}'.format(ConverterCustomTarget.tgt_counter)
ConverterCustomTarget.tgt_counter += 1
self.cmake_name = str(self.name)
self.original_outputs = list(target.outputs)
self.outputs = [os.path.basename(x) for x in self.original_outputs]
self.conflict_map = {}
self.command = target.command
self.working_dir = target.working_dir
self.depends_raw = target.depends
self.inputs = []
self.depends = []
# Convert the target name to a valid meson target name
self.name = self.name.replace('-', '_')
self.name = generated_target_name_prefix + self.name
def __repr__(self) -> str:
return '<{}: {} {}>'.format(self.__class__.__name__, self.name, self.outputs)
def postprocess(self, output_target_map: OutputTargetMap, root_src_dir: str, subdir: str, build_dir: str, all_outputs: T.List[str]) -> None:
# Default the working directory to the CMake build dir. This
# is not 100% correct, since it should be the value of
# ${CMAKE_CURRENT_BINARY_DIR} when add_custom_command is
# called. However, keeping track of this variable is not
# trivial and the current solution should work in most cases.
if not self.working_dir:
self.working_dir = build_dir
# relative paths in the working directory are always relative
# to ${CMAKE_CURRENT_BINARY_DIR} (see note above)
if not os.path.isabs(self.working_dir):
self.working_dir = os.path.normpath(os.path.join(build_dir, self.working_dir))
# Modify the original outputs if they are relative. Again,
# relative paths are relative to ${CMAKE_CURRENT_BINARY_DIR}
# and the first disclaimer is still in effect
def ensure_absolute(x: str):
if os.path.isabs(x):
return x
else:
return os.path.normpath(os.path.join(build_dir, x))
self.original_outputs = [ensure_absolute(x) for x in self.original_outputs]
# Ensure that there is no duplicate output in the project so
# that meson can handle cases where the same filename is
# generated in multiple directories
temp_outputs = [] # type: T.List[str]
for i in self.outputs:
if i in all_outputs:
old = str(i)
i = 'c{}_{}'.format(ConverterCustomTarget.out_counter, i)
ConverterCustomTarget.out_counter += 1
self.conflict_map[old] = i
all_outputs += [i]
temp_outputs += [i]
self.outputs = temp_outputs
# Check if the command is a build target
commands = []
for i in self.command:
assert(isinstance(i, list))
cmd = []
for j in i:
if not j:
continue
target = output_target_map.target(j)
cmd += [target] if target else [j]
commands += [cmd]
self.command = commands
# If the custom target does not declare any output, create a dummy
# one that can be used as dependency.
if not self.outputs:
self.outputs = [self.name + '.h']
# Check dependencies and input files
for i in self.depends_raw:
if not i:
continue
art = output_target_map.artifact(i)
tgt = output_target_map.target(i)
gen = output_target_map.generated(i)
if art:
self.depends += [art]
elif tgt:
self.depends += [tgt]
elif gen:
self.inputs += [gen.get_ref(i)]
elif not os.path.isabs(i) and os.path.exists(os.path.join(root_src_dir, i)):
self.inputs += [i]
elif os.path.isabs(i) and os.path.exists(i) and os.path.commonpath([i, root_src_dir]) == root_src_dir:
self.inputs += [os.path.relpath(i, root_src_dir)]
def process_inter_target_dependencies(self):
# Move the dependencies from all transfer_dependencies_from to the target
to_process = list(self.depends)
processed = []
new_deps = []
for i in to_process:
processed += [i]
if isinstance(i, ConverterTarget) and i.meson_func() in transfer_dependencies_from:
to_process += [x for x in i.depends if x not in processed]
else:
new_deps += [i]
self.depends = list(set(new_deps))
def get_ref(self, fname: str) -> T.Optional[CustomTargetReference]:
fname = os.path.basename(fname)
try:
if fname in self.conflict_map:
fname = self.conflict_map[fname]
idx = self.outputs.index(fname)
return CustomTargetReference(self, idx)
except ValueError:
return None
def log(self) -> None:
mlog.log('Custom Target', mlog.bold(self.name))
mlog.log(' -- command: ', mlog.bold(str(self.command)))
mlog.log(' -- outputs: ', mlog.bold(str(self.outputs)))
mlog.log(' -- conflict_map: ', mlog.bold(str(self.conflict_map)))
mlog.log(' -- working_dir: ', mlog.bold(str(self.working_dir)))
mlog.log(' -- depends_raw: ', mlog.bold(str(self.depends_raw)))
mlog.log(' -- inputs: ', mlog.bold(str(self.inputs)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
class CMakeAPI(Enum):
SERVER = 1
FILE = 2
class CMakeInterpreter:
def __init__(self, build: 'Build', subdir: str, src_dir: str, install_prefix: str, env: Environment, backend: 'Backend'):
assert(hasattr(backend, 'name'))
self.build = build
self.subdir = subdir
self.src_dir = src_dir
self.build_dir_rel = os.path.join(subdir, '__CMake_build')
self.build_dir = os.path.join(env.get_build_dir(), self.build_dir_rel)
self.install_prefix = install_prefix
self.env = env
self.backend_name = backend.name
self.linkers = set() # type: T.Set[str]
self.cmake_api = CMakeAPI.SERVER
self.client = CMakeClient(self.env)
self.fileapi = CMakeFileAPI(self.build_dir)
# Raw CMake results
self.bs_files = []
self.codemodel_configs = None
self.raw_trace = None
# Analysed data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = [] # type: T.List[ConverterCustomTarget]
self.trace = CMakeTraceParser()
self.output_target_map = OutputTargetMap(self.build_dir)
# Generated meson data
self.generated_targets = {}
self.internal_name_map = {}
def configure(self, extra_cmake_options: T.List[str]) -> None:
for_machine = MachineChoice.HOST # TODO make parameter
# Find CMake
cmake_exe = CMakeExecutor(self.env, '>=3.7', for_machine)
if not cmake_exe.found():
raise CMakeException('Unable to find CMake')
generator = backend_generator_map[self.backend_name]
cmake_args = cmake_exe.get_command()
trace_args = ['--trace', '--trace-expand', '--no-warn-unused-cli']
cmcmp_args = ['-DCMAKE_POLICY_WARNING_{}=OFF'.format(x) for x in disable_policy_warnings]
if version_compare(cmake_exe.version(), '>=3.14'):
self.cmake_api = CMakeAPI.FILE
self.fileapi.setup_request()
# Map meson compiler to CMake variables
for lang, comp in self.env.coredata.compilers[for_machine].items():
if lang not in language_map:
continue
self.linkers.add(comp.get_linker_id())
cmake_lang = language_map[lang]
exelist = comp.get_exelist()
if len(exelist) == 1:
cmake_args += ['-DCMAKE_{}_COMPILER={}'.format(cmake_lang, exelist[0])]
elif len(exelist) == 2:
cmake_args += ['-DCMAKE_{}_COMPILER_LAUNCHER={}'.format(cmake_lang, exelist[0]),
'-DCMAKE_{}_COMPILER={}'.format(cmake_lang, exelist[1])]
if hasattr(comp, 'get_linker_exelist') and comp.get_id() == 'clang-cl':
cmake_args += ['-DCMAKE_LINKER={}'.format(comp.get_linker_exelist()[0])]
cmake_args += ['-G', generator]
cmake_args += ['-DCMAKE_INSTALL_PREFIX={}'.format(self.install_prefix)]
cmake_args += extra_cmake_options
# Run CMake
mlog.log()
with mlog.nested():
mlog.log('Configuring the build directory with', mlog.bold('CMake'), 'version', mlog.cyan(cmake_exe.version()))
mlog.log(mlog.bold('Running:'), ' '.join(cmake_args))
mlog.log(mlog.bold(' - build directory: '), self.build_dir)
mlog.log(mlog.bold(' - source directory: '), self.src_dir)
mlog.log(mlog.bold(' - trace args: '), ' '.join(trace_args))
mlog.log(mlog.bold(' - disabled policy warnings:'), '[{}]'.format(', '.join(disable_policy_warnings)))
mlog.log()
os.makedirs(self.build_dir, exist_ok=True)
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
final_command = cmake_args + trace_args + cmcmp_args + [self.src_dir]
proc = Popen(final_command, stdout=PIPE, stderr=PIPE, cwd=self.build_dir, env=os_env)
def print_stdout():
while True:
line = proc.stdout.readline()
if not line:
break
mlog.log(line.decode('utf-8').strip('\n'))
proc.stdout.close()
t = Thread(target=print_stdout)
t.start()
# Read stderr line by line and log non trace lines
self.raw_trace = ''
tline_start_reg = re.compile(r'^\s*(.*\.(cmake|txt))\(([0-9]+)\):\s*(\w+)\(.*$')
inside_multiline_trace = False
while True:
line = proc.stderr.readline()
if not line:
break
line = line.decode('utf-8')
if tline_start_reg.match(line):
self.raw_trace += line
inside_multiline_trace = not line.endswith(' )\n')
elif inside_multiline_trace:
self.raw_trace += line
else:
mlog.warning(line.strip('\n'))
proc.stderr.close()
proc.wait()
t.join()
mlog.log()
h = mlog.green('SUCCEEDED') if proc.returncode == 0 else mlog.red('FAILED')
mlog.log('CMake configuration:', h)
if proc.returncode != 0:
raise CMakeException('Failed to configure the CMake subproject')
def initialise(self, extra_cmake_options: T.List[str]) -> None:
# Run configure the old way because doing it
# with the server doesn't work for some reason
# Additionally, the File API requires a configure anyway
self.configure(extra_cmake_options)
# Continue with the file API If supported
if self.cmake_api is CMakeAPI.FILE:
# Parse the result
self.fileapi.load_reply()
# Load the buildsystem file list
cmake_files = self.fileapi.get_cmake_sources()
self.bs_files = [x.file for x in cmake_files if not x.is_cmake and not x.is_temp]
self.bs_files = [os.path.relpath(x, self.env.get_source_dir()) for x in self.bs_files]
self.bs_files = list(set(self.bs_files))
# Load the codemodel configurations
self.codemodel_configs = self.fileapi.get_cmake_configurations()
return
with self.client.connect():
generator = backend_generator_map[self.backend_name]
self.client.do_handshake(self.src_dir, self.build_dir, generator, 1)
# Do a second configure to initialise the server
self.client.query_checked(RequestConfigure(), 'CMake server configure')
# Generate the build system files
self.client.query_checked(RequestCompute(), 'Generating build system files')
# Get CMake build system files
bs_reply = self.client.query_checked(RequestCMakeInputs(), 'Querying build system files')
# Now get the CMake code model
cm_reply = self.client.query_checked(RequestCodeModel(), 'Querying the CMake code model')
src_dir = bs_reply.src_dir
self.bs_files = [x.file for x in bs_reply.build_files if not x.is_cmake and not x.is_temp]
self.bs_files = [os.path.relpath(os.path.join(src_dir, x), self.env.get_source_dir()) for x in self.bs_files]
self.bs_files = list(set(self.bs_files))
self.codemodel_configs = cm_reply.configs
def analyse(self) -> None:
if self.codemodel_configs is None:
raise CMakeException('CMakeInterpreter was not initialized')
# Clear analyser data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = []
self.trace = CMakeTraceParser(permissive=True)
# Parse the trace
self.trace.parse(self.raw_trace)
# Find all targets
added_target_names = [] # type: T.List[str]
for i in self.codemodel_configs:
for j in i.projects:
if not self.project_name:
self.project_name = j.name
for k in j.targets:
# Avoid duplicate targets from different configurations and known
# dummy CMake internal target types
if k.type not in skip_targets and k.name not in added_target_names:
added_target_names += [k.name]
self.targets += [ConverterTarget(k, self.env)]
# Add interface targets from trace, if not already present.
# This step is required because interface targets were removed from
# the CMake file API output.
api_target_name_list = [x.name for x in self.targets]
for i in self.trace.targets.values():
if i.type != 'INTERFACE' or i.name in api_target_name_list or i.imported:
continue
dummy = CMakeTarget({
'name': i.name,
'type': 'INTERFACE_LIBRARY',
'sourceDirectory': self.src_dir,
'buildDirectory': self.build_dir,
})
self.targets += [ConverterTarget(dummy, self.env)]
for i in self.trace.custom_targets:
self.custom_targets += [ConverterCustomTarget(i)]
# generate the output_target_map
for i in [*self.targets, *self.custom_targets]:
self.output_target_map.add(i)
# First pass: Basic target cleanup
object_libs = []
custom_target_outputs = [] # type: T.List[str]
for i in self.custom_targets:
i.postprocess(self.output_target_map, self.src_dir, self.subdir, self.build_dir, custom_target_outputs)
for i in self.targets:
i.postprocess(self.output_target_map, self.src_dir, self.subdir, self.install_prefix, self.trace)
if i.type == 'OBJECT_LIBRARY':
object_libs += [i]
self.languages += [x for x in i.languages if x not in self.languages]
# Second pass: Detect object library dependencies
for i in self.targets:
i.process_object_libs(object_libs, self._object_lib_workaround())
# Third pass: Reassign dependencies to avoid some loops
for i in self.targets:
i.process_inter_target_dependencies()
for i in self.custom_targets:
i.process_inter_target_dependencies()
# Fourth pass: Remove rassigned dependencies
for i in self.targets:
i.cleanup_dependencies()
mlog.log('CMake project', mlog.bold(self.project_name), 'has', mlog.bold(str(len(self.targets) + len(self.custom_targets))), 'build targets.')
def pretend_to_be_meson(self) -> CodeBlockNode:
if not self.project_name:
raise CMakeException('CMakeInterpreter was not analysed')
def token(tid: str = 'string', val='') -> Token:
return Token(tid, self.subdir, 0, 0, 0, None, val)
def string(value: str) -> StringNode:
return StringNode(token(val=value))
def id_node(value: str) -> IdNode:
return IdNode(token(val=value))
def number(value: int) -> NumberNode:
return NumberNode(token(val=value))
def nodeify(value):
if isinstance(value, str):
return string(value)
elif isinstance(value, bool):
return BooleanNode(token(), value)
elif isinstance(value, int):
return number(value)
elif isinstance(value, list):
return array(value)
return value
def indexed(node: BaseNode, index: int) -> IndexNode:
return IndexNode(node, nodeify(index))
def array(elements) -> ArrayNode:
args = ArgumentNode(token())
if not isinstance(elements, list):
elements = [args]
args.arguments += [nodeify(x) for x in elements if x is not None]
return ArrayNode(args, 0, 0, 0, 0)
def function(name: str, args=None, kwargs=None) -> FunctionNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {k: nodeify(v) for k, v in kwargs.items() if v is not None}
func_n = FunctionNode(self.subdir, 0, 0, 0, 0, name, args_n)
return func_n
def method(obj: BaseNode, name: str, args=None, kwargs=None) -> MethodNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {k: nodeify(v) for k, v in kwargs.items() if v is not None}
return MethodNode(self.subdir, 0, 0, obj, name, args_n)
def assign(var_name: str, value: BaseNode) -> AssignmentNode:
return AssignmentNode(self.subdir, 0, 0, var_name, value)
# Generate the root code block and the project function call
root_cb = CodeBlockNode(token())
root_cb.lines += [function('project', [self.project_name] + self.languages)]
# Add the run script for custom commands
run_script = '{}/data/run_ctgt.py'.format(os.path.dirname(os.path.realpath(__file__)))
run_script_var = 'ctgt_run_script'
root_cb.lines += [assign(run_script_var, function('find_program', [[run_script]], {'required': True}))]
# Add the targets
processing = []
processed = {}
name_map = {}
def extract_tgt(tgt: T.Union[ConverterTarget, ConverterCustomTarget, CustomTargetReference]) -> IdNode:
tgt_name = None
if isinstance(tgt, (ConverterTarget, ConverterCustomTarget)):
tgt_name = tgt.name
elif isinstance(tgt, CustomTargetReference):
tgt_name = tgt.ctgt.name
assert(tgt_name is not None and tgt_name in processed)
res_var = processed[tgt_name]['tgt']
return id_node(res_var) if res_var else None
def detect_cycle(tgt: T.Union[ConverterTarget, ConverterCustomTarget]) -> None:
if tgt.name in processing:
raise CMakeException('Cycle in CMake inputs/dependencies detected')
processing.append(tgt.name)
def resolve_ctgt_ref(ref: CustomTargetReference) -> BaseNode:
tgt_var = extract_tgt(ref)
if len(ref.ctgt.outputs) == 1:
return tgt_var
else:
return indexed(tgt_var, ref.index)
def process_target(tgt: ConverterTarget):
detect_cycle(tgt)
# First handle inter target dependencies
link_with = []
objec_libs = [] # type: T.List[IdNode]
sources = []
generated = []
generated_filenames = []
custom_targets = []
dependencies = []
for i in tgt.link_with:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
link_with += [extract_tgt(i)]
for i in tgt.object_libs:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
objec_libs += [extract_tgt(i)]
for i in tgt.depends:
if not isinstance(i, ConverterCustomTarget):
continue
if i.name not in processed:
process_custom_target(i)
dependencies += [extract_tgt(i)]
# Generate the source list and handle generated sources
for i in tgt.sources + tgt.generated:
if isinstance(i, CustomTargetReference):
if i.ctgt.name not in processed:
process_custom_target(i.ctgt)
generated += [resolve_ctgt_ref(i)]
generated_filenames += [i.filename()]
if i.ctgt not in custom_targets:
custom_targets += [i.ctgt]
else:
sources += [i]
# Add all header files from all used custom targets. This
# ensures that all custom targets are built before any
# sources of the current target are compiled and thus all
# header files are present. This step is necessary because
# CMake always ensures that a custom target is executed
# before another target if at least one output is used.
for i in custom_targets:
for j in i.outputs:
if not is_header(j) or j in generated_filenames:
continue
generated += [resolve_ctgt_ref(i.get_ref(j))]
generated_filenames += [j]
# Determine the meson function to use for the build target
tgt_func = tgt.meson_func()
if not tgt_func:
raise CMakeException('Unknown target type "{}"'.format(tgt.type))
# Determine the variable names
inc_var = '{}_inc'.format(tgt.name)
dir_var = '{}_dir'.format(tgt.name)
sys_var = '{}_sys'.format(tgt.name)
src_var = '{}_src'.format(tgt.name)
dep_var = '{}_dep'.format(tgt.name)
tgt_var = tgt.name
# Generate target kwargs
tgt_kwargs = {
'build_by_default': False,
'link_args': tgt.link_flags + tgt.link_libraries,
'link_with': link_with,
'include_directories': id_node(inc_var),
'install': tgt.install,
'install_dir': tgt.install_dir,
'override_options': tgt.override_options,
'objects': [method(x, 'extract_all_objects') for x in objec_libs],
}
# Handle compiler args
for key, val in tgt.compile_opts.items():
tgt_kwargs['{}_args'.format(key)] = val
# Handle -fPCI, etc
if tgt_func == 'executable':
tgt_kwargs['pie'] = tgt.pie
elif tgt_func == 'static_library':
tgt_kwargs['pic'] = tgt.pie
# declare_dependency kwargs
dep_kwargs = {
'link_args': tgt.link_flags + tgt.link_libraries,
'link_with': id_node(tgt_var),
'compile_args': tgt.public_compile_opts,
'include_directories': id_node(inc_var),
}
if dependencies:
generated += dependencies
# Generate the function nodes
dir_node = assign(dir_var, function('include_directories', tgt.includes))
sys_node = assign(sys_var, function('include_directories', tgt.sys_includes, {'is_system': True}))
inc_node = assign(inc_var, array([id_node(dir_var), id_node(sys_var)]))
node_list = [dir_node, sys_node, inc_node]
if tgt_func == 'header_only':
del dep_kwargs['link_with']
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
src_var = None
tgt_var = None
else:
src_node = assign(src_var, function('files', sources))
tgt_node = assign(tgt_var, function(tgt_func, [tgt_var, [id_node(src_var)] + generated], tgt_kwargs))
node_list += [src_node, tgt_node]
if tgt_func in ['static_library', 'shared_library']:
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
else:
dep_var = None
# Add the nodes to the ast
root_cb.lines += node_list
processed[tgt.name] = {'inc': inc_var, 'src': src_var, 'dep': dep_var, 'tgt': tgt_var, 'func': tgt_func}
name_map[tgt.cmake_name] = tgt.name
def process_custom_target(tgt: ConverterCustomTarget) -> None:
# CMake allows to specify multiple commands in a custom target.
# To map this to meson, a helper script is used to execute all
# commands in order. This additionally allows setting the working
# directory.
detect_cycle(tgt)
tgt_var = tgt.name # type: str
def resolve_source(x: T.Any) -> T.Any:
if isinstance(x, ConverterTarget):
if x.name not in processed:
process_target(x)
return extract_tgt(x)
if isinstance(x, ConverterCustomTarget):
if x.name not in processed:
process_custom_target(x)
return extract_tgt(x)
elif isinstance(x, CustomTargetReference):
if x.ctgt.name not in processed:
process_custom_target(x.ctgt)
return resolve_ctgt_ref(x)
else:
return x
# Generate the command list
command = []
command += [id_node(run_script_var)]
command += ['-o', '@OUTPUT@']
if tgt.original_outputs:
command += ['-O'] + tgt.original_outputs
command += ['-d', tgt.working_dir]
# Generate the commands. Subcommands are separated by ';;;'
for cmd in tgt.command:
command += [resolve_source(x) for x in cmd] + [';;;']
tgt_kwargs = {
'input': [resolve_source(x) for x in tgt.inputs],
'output': tgt.outputs,
'command': command,
'depends': [resolve_source(x) for x in tgt.depends],
}
root_cb.lines += [assign(tgt_var, function('custom_target', [tgt.name], tgt_kwargs))]
processed[tgt.name] = {'inc': None, 'src': None, 'dep': None, 'tgt': tgt_var, 'func': 'custom_target'}
# Now generate the target function calls
for i in self.custom_targets:
if i.name not in processed:
process_custom_target(i)
for i in self.targets:
if i.name not in processed:
process_target(i)
self.generated_targets = processed
self.internal_name_map = name_map
return root_cb
def target_info(self, target: str) -> T.Optional[T.Dict[str, str]]:
# Try resolving the target name
# start by checking if there is a 100% match (excluding the name prefix)
prx_tgt = generated_target_name_prefix + target
if prx_tgt in self.generated_targets:
return self.generated_targets[prx_tgt]
# check if there exists a name mapping
if target in self.internal_name_map:
target = self.internal_name_map[target]
assert(target in self.generated_targets)
return self.generated_targets[target]
return None
def target_list(self) -> T.List[str]:
prx_str = generated_target_name_prefix
prx_len = len(prx_str)
res = [x for x in self.generated_targets.keys()]
res = [x[prx_len:] if x.startswith(prx_str) else x for x in res]
return res
def _object_lib_workaround(self) -> bool:
return 'link' in self.linkers and self.backend_name.startswith('vs')
|
testutils.py
|
from __future__ import print_function
import os
import sys
from types import TracebackType
import isodate
import datetime
import random
from contextlib import AbstractContextManager, contextmanager
from typing import (
Callable,
Iterable,
List,
Optional,
TYPE_CHECKING,
Type,
Iterator,
Set,
Tuple,
Dict,
Any,
TypeVar,
cast,
NamedTuple,
)
from urllib.parse import ParseResult, unquote, urlparse, parse_qs
from traceback import print_exc
from threading import Thread
from http.server import (
BaseHTTPRequestHandler,
HTTPServer,
SimpleHTTPRequestHandler,
)
import email.message
import unittest
from rdflib import BNode, Graph
from rdflib.term import Node
from unittest.mock import MagicMock, Mock
from urllib.error import HTTPError
from urllib.request import urlopen
from pathlib import PurePath, PureWindowsPath
from nturl2path import url2pathname as nt_url2pathname
if TYPE_CHECKING:
import typing_extensions as te
def get_random_ip(parts: List[str] = None) -> str:
if parts is None:
parts = ["127"]
for _ in range(4 - len(parts)):
parts.append(f"{random.randint(0, 255)}")
return ".".join(parts)
@contextmanager
def ctx_http_server(
handler: Type[BaseHTTPRequestHandler], host: str = "127.0.0.1"
) -> Iterator[HTTPServer]:
server = HTTPServer((host, 0), handler)
server_thread = Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
yield server
server.shutdown()
server.socket.close()
server_thread.join()
class GraphHelper:
@classmethod
def triple_set(cls, graph: Graph) -> Set[Tuple[Node, Node, Node]]:
return set(graph.triples((None, None, None)))
@classmethod
def triple_sets(
cls, graphs: Iterable[Graph]
) -> List[Set[Tuple[Node, Node, Node]]]:
result: List[Set[Tuple[Node, Node, Node]]] = []
for graph in graphs:
result.append(cls.triple_set(graph))
return result
@classmethod
def equals(cls, lhs: Graph, rhs: Graph) -> bool:
return cls.triple_set(lhs) == cls.triple_set(rhs)
GenericT = TypeVar("GenericT", bound=Any)
def make_spypair(method: GenericT) -> Tuple[GenericT, Mock]:
m = MagicMock()
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
m(*args, **kwargs)
return method(self, *args, **kwargs)
setattr(wrapper, "mock", m)
return cast(GenericT, wrapper), m
HeadersT = Dict[str, List[str]]
PathQueryT = Dict[str, List[str]]
class MockHTTPRequests(NamedTuple):
method: str
path: str
parsed_path: ParseResult
path_query: PathQueryT
headers: email.message.Message
class MockHTTPResponse(NamedTuple):
status_code: int
reason_phrase: str
body: bytes
headers: HeadersT
class SimpleHTTPMock:
"""
SimpleHTTPMock allows testing of code that relies on an HTTP server.
NOTE: Currently only the GET and POST methods is supported.
Objects of this class has a list of responses for each method (GET, POST, etc...)
and returns these responses for these methods in sequence.
All request received are appended to a method specific list.
Example usage:
>>> httpmock = SimpleHTTPMock()
>>> with ctx_http_server(httpmock.Handler) as server:
... url = "http://{}:{}".format(*server.server_address)
... # add a response the server should give:
... httpmock.do_get_responses.append(
... MockHTTPResponse(404, "Not Found", b"gone away", {})
... )
...
... # send a request to get the first response
... http_error: Optional[HTTPError] = None
... try:
... urlopen(f"{url}/bad/path")
... except HTTPError as caught:
... http_error = caught
...
... assert http_error is not None
... assert http_error.code == 404
...
... # get and validate request that the mock received
... req = httpmock.do_get_requests.pop(0)
... assert req.path == "/bad/path"
"""
# TODO: add additional methods (PUT, PATCH, ...) similar to GET and POST
def __init__(self):
self.do_get_requests: List[MockHTTPRequests] = []
self.do_get_responses: List[MockHTTPResponse] = []
self.do_post_requests: List[MockHTTPRequests] = []
self.do_post_responses: List[MockHTTPResponse] = []
_http_mock = self
class Handler(SimpleHTTPRequestHandler):
http_mock = _http_mock
def _do_GET(self):
parsed_path = urlparse(self.path)
path_query = parse_qs(parsed_path.query)
request = MockHTTPRequests(
"GET", self.path, parsed_path, path_query, self.headers
)
self.http_mock.do_get_requests.append(request)
response = self.http_mock.do_get_responses.pop(0)
self.send_response(
response.status_code, response.reason_phrase
)
for header, values in response.headers.items():
for value in values:
self.send_header(header, value)
self.end_headers()
self.wfile.write(response.body)
self.wfile.flush()
return
(do_GET, do_GET_mock) = make_spypair(_do_GET)
def _do_POST(self):
parsed_path = urlparse(self.path)
path_query = parse_qs(parsed_path.query)
request = MockHTTPRequests(
"POST", self.path, parsed_path, path_query, self.headers
)
self.http_mock.do_post_requests.append(request)
response = self.http_mock.do_post_responses.pop(0)
self.send_response(
response.status_code, response.reason_phrase
)
for header, values in response.headers.items():
for value in values:
self.send_header(header, value)
self.end_headers()
self.wfile.write(response.body)
self.wfile.flush()
return
(do_POST, do_POST_mock) = make_spypair(_do_POST)
def log_message(self, format: str, *args: Any) -> None:
pass
self.Handler = Handler
self.do_get_mock = Handler.do_GET_mock
self.do_post_mock = Handler.do_POST_mock
def reset(self):
self.do_get_requests.clear()
self.do_get_responses.clear()
self.do_get_mock.reset_mock()
self.do_post_requests.clear()
self.do_post_responses.clear()
self.do_post_mock.reset_mock()
@property
def call_count(self):
return self.do_post_mock.call_count + self.do_get_mock.call_count
class SimpleHTTPMockTests(unittest.TestCase):
def test_example(self) -> None:
httpmock = SimpleHTTPMock()
with ctx_http_server(httpmock.Handler) as server:
url = "http://{}:{}".format(*server.server_address)
# add two responses the server should give:
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
# send a request to get the first response
with self.assertRaises(HTTPError) as raised:
urlopen(f"{url}/bad/path")
assert raised.exception.code == 404
# get and validate request that the mock received
req = httpmock.do_get_requests.pop(0)
self.assertEqual(req.path, "/bad/path")
# send a request to get the second response
resp = urlopen(f"{url}/")
self.assertEqual(resp.status, 200)
self.assertEqual(resp.read(), b"here it is")
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
class ServedSimpleHTTPMock(SimpleHTTPMock, AbstractContextManager):
"""
ServedSimpleHTTPMock is a ServedSimpleHTTPMock with a HTTP server.
Example usage:
>>> with ServedSimpleHTTPMock() as httpmock:
... # add a response the server should give:
... httpmock.do_get_responses.append(
... MockHTTPResponse(404, "Not Found", b"gone away", {})
... )
...
... # send a request to get the first response
... http_error: Optional[HTTPError] = None
... try:
... urlopen(f"{httpmock.url}/bad/path")
... except HTTPError as caught:
... http_error = caught
...
... assert http_error is not None
... assert http_error.code == 404
...
... # get and validate request that the mock received
... req = httpmock.do_get_requests.pop(0)
... assert req.path == "/bad/path"
"""
def __init__(self, host: str = "127.0.0.1"):
super().__init__()
self.server = HTTPServer((host, 0), self.Handler)
self.server_thread = Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self) -> None:
self.server.shutdown()
self.server.socket.close()
self.server_thread.join()
@property
def address_string(self) -> str:
(host, port) = self.server.server_address
return f"{host}:{port}"
@property
def url(self) -> str:
return f"http://{self.address_string}"
def __enter__(self) -> "ServedSimpleHTTPMock":
return self
def __exit__(
self,
__exc_type: Optional[Type[BaseException]],
__exc_value: Optional[BaseException],
__traceback: Optional[TracebackType],
) -> "te.Literal[False]":
self.stop()
return False
class ServedSimpleHTTPMockTests(unittest.TestCase):
def test_example(self) -> None:
with ServedSimpleHTTPMock() as httpmock:
# add two responses the server should give:
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
# send a request to get the first response
with self.assertRaises(HTTPError) as raised:
urlopen(f"{httpmock.url}/bad/path")
assert raised.exception.code == 404
# get and validate request that the mock received
req = httpmock.do_get_requests.pop(0)
self.assertEqual(req.path, "/bad/path")
# send a request to get the second response
resp = urlopen(f"{httpmock.url}/")
self.assertEqual(resp.status, 200)
self.assertEqual(resp.read(), b"here it is")
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
def eq_(lhs, rhs, msg=None):
"""
This function mimicks the similar function from nosetest. Ideally nothing
should use it but there is a lot of code that still does and it's fairly
simple to just keep this small pollyfill here for now.
"""
if msg:
assert lhs == rhs, msg
else:
assert lhs == rhs
PurePathT = TypeVar("PurePathT", bound=PurePath)
def file_uri_to_path(
file_uri: str,
path_class: Type[PurePathT] = PurePath, # type: ignore[assignment]
url2pathname: Optional[Callable[[str], str]] = None,
) -> PurePathT:
"""
This function returns a pathlib.PurePath object for the supplied file URI.
:param str file_uri: The file URI ...
:param class path_class: The type of path in the file_uri. By default it uses
the system specific path pathlib.PurePath, to force a specific type of path
pass pathlib.PureWindowsPath or pathlib.PurePosixPath
:returns: the pathlib.PurePath object
:rtype: pathlib.PurePath
"""
is_windows_path = isinstance(path_class(), PureWindowsPath)
file_uri_parsed = urlparse(file_uri)
if url2pathname is None:
if is_windows_path:
url2pathname = nt_url2pathname
else:
url2pathname = unquote
pathname = url2pathname(file_uri_parsed.path)
result = path_class(pathname)
return result
|
__init__.py
|
# -*- encoding: utf-8 -*-
"""
@File : __init__.py.py
@Time : 2020/6/28 21:20
@Author : chise
@Email : chise123@live.com
@Software: PyCharm
@info :
"""
# -*- encoding: utf-8 -*-
import asyncio
import json
from multiprocessing import Process
from typing import List
import aiohttp
from settings import server_host2, server_host, server_id, server_host3
class Sign():
@classmethod
def add_sign(cls, data: dict):
return data
@classmethod
def check_sign(cls, data: dict):
return data
@classmethod
def get_sign(cls, data: dict):
return data
class aioClient(Sign):
"""
临时交互
"""
@classmethod
async def send(cls, data: dict) -> dict:
session = aiohttp.ClientSession()
async with session.ws_connect(server_host2) as ws:
data = cls.add_sign(data)
await ws.send_str(json.dumps(data))
ret = await ws.receive()
await session.close()
assert ret, "返回数据异常"
ret=ret.json()
cls.check_sign(ret)
return ret
processes = {}
from run2 import start
class WebClient(Sign):
"""
全局交互
"""
instance = None
session = None
tasks: List[dict] = [] # 用户执行中的任务的名称列表
ws_client = None
def __new__(cls, *args, **kwargs):
if not cls.instance:
cls.instance = super().__new__(cls, *args, **kwargs)
return cls.instance
@classmethod
def get_all_user(cls, accounts):
"""
启动所有user
:param accounts:
:return:
"""
for account in accounts:
process = Process(target=start, args=(account['id'],))
processes[account['id']] = process
process.start()
@classmethod
def get_user(cls, account):
"""
启动一个user
:param account:
:return:
"""
process = Process(target=start, args=(account['id'],))
processes[account['id']] = process
process.start()
@classmethod
async def receive(cls):
cls.session = aiohttp.ClientSession()
async with cls.session.ws_connect(server_host3) as ws:
cls.ws_client = ws
await ws.send_str(json.dumps({"message": "start", "server_id": server_id}))
while True:
data = await ws.receive()
data = data.json()
cls.check_sign(data)
if data['message'] == 'get_user':
cls.get_user(data['account_id'])
elif data['message'] == 'get_all_user':
cls.get_all_user(data['accounts'])
@classmethod
async def send(cls, data: dict):
if not cls.ws_client:
await asyncio.sleep(1)
if not cls.ws_client:
raise Exception("websocket客户端启动失败")
return await cls.ws_client.send_str(json.dumps(data))
@classmethod
async def modify_msg(cls, data: dict):
"""
处理获取到的信息
:param data:
:return:
"""
pass
|
test_server.py
|
# *****************************************
# |docname| - Tests using the web2py server
# *****************************************
# These tests start the web2py server then submit requests to it. All the fixtures are auto-imported by pytest from ``conftest.py``.
#
# .. contents::
#
# Imports
# =======
# These are listed in the order prescribed by `PEP 8
# <http://www.python.org/dev/peps/pep-0008/#imports>`_.
#
# Standard library
# ----------------
from textwrap import dedent
import json
from threading import Thread
import datetime
import re
import sys
import time
# Third-party imports
# -------------------
import pytest
import six
# Local imports
# -------------
from .utils import web2py_controller_import
# Debugging notes
# ===============
# Invoke the debugger.
##import pdb; pdb.set_trace()
# Put this in web2py code, then use the web-based debugger.
##from gluon.debug import dbg; dbg.set_trace()
# Tests
# =====
# Use for easy manual testing of the server, by setting up a user and class automatically. Comment out the line below to enable it.
@pytest.mark.skip(reason="Only needed for manual testing.")
def test_manual(runestone_db_tools, test_user):
# Modify this as desired to create courses, users, etc. for manual testing.
course_1 = runestone_db_tools.create_course()
test_user("bob", "bob", course_1)
# Pause in the debugger until manual testing is done.
import pdb
pdb.set_trace()
def test_killer(test_assignment, test_client, test_user_1, runestone_db_tools):
"""
This test ensures that we have the routing set up for testing properly.
This test will fail if routes.py is set up as follows.
routes_onerror = [
('runestone/static/404', '/runestone/static/fail.html'),
('runestone/500', '/runestone/default/reportabug.html'),
]
for testing purposes we don't want web2py to capture 500 errors.
"""
with pytest.raises(Exception) as excinfo:
test_client.post("admin/killer")
assert test_client.text == ""
print(excinfo.value)
assert "ticket" in str(excinfo.value) or "INTERNAL" in str(excinfo.value)
# Validate the HTML produced by various web2py pages.
# NOTE -- this is the start of a really really long decorator for test_1
@pytest.mark.parametrize(
"url, requires_login, expected_string, expected_errors",
[
# **Admin**
# ----------
# FIXME: Flashed messages don't seem to work.
# ('admin/index', False, 'You must be registered for a course to access this page', 1),
# ('admin/index', True, 'You must be an instructor to access this page', 1),
("admin/doc", True, "Runestone Help and Documentation", 1),
# **Assignments**
# ----------------
("assignments/chooseAssignment", True, "Assignments", 1),
("assignments/doAssignment", True, "Bad Assignment ID", 1),
(
"assignments/practice",
True,
"Practice tool is not set up for this course yet.",
1,
),
("assignments/practiceNotStartedYet", True, "test_course_1", 1),
# **Default**
# ------------
# *User*
#
# The `authentication <http://web2py.com/books/default/chapter/29/09/access-control#Authentication>`_ section gives the URLs exposed by web2py. Check these.
("default/user/login", False, "Login", 1),
("default/user/register", False, "Registration", 1),
("default/user/logout", True, "Logged out", 1),
# One validation error is a result of removing the input field for the e-mail, but web2py still tries to label it, which is an error.
("default/user/profile", True, "Profile", 2),
("default/user/change_password", True, "Change password", 1),
# Runestone doesn't support this.
#'default/user/verify_email', False, 'Verify email', 1),
("default/user/retrieve_username", False, "Retrieve username", 1),
("default/user/request_reset_password", False, "Request reset password", 1),
# This doesn't display a webpage, but instead redirects to courses.
# ('default/user/reset_password, False, 'Reset password', 1),
("default/user/impersonate", True, "Impersonate", 1),
# FIXME: This produces an exception.
#'default/user/groups', True, 'Groups', 1),
("default/user/not_authorized", False, "Not authorized", 1),
# *Other pages*
#
# TODO: What is this for?
# ('default/call', False, 'Not found', 0),
("default/index", True, "Course Selection", 1),
("default/about", False, "About Us", 1),
("default/error", False, "Error: the document does not exist", 1),
("default/ack", False, "Acknowledgements", 1),
# web2py generates invalid labels for the radio buttons in this form.
("default/bio", True, "Tell Us About Yourself", 3),
("default/courses", True, "Course Selection", 1),
("default/remove", True, "Remove a Course", 1),
# Should work in both cases.
("default/reportabug", False, "Report a Bug", 1),
("default/reportabug", True, "Report a Bug", 1),
# ('default/sendreport', True, 'Could not create issue', 1),
("default/terms", False, "Terms and Conditions", 1),
("default/privacy", False, "Runestone Academy Privacy Policy", 1),
("default/donate", False, "Support Runestone Interactive", 1),
# TODO: This doesn't really test much of the body of either of these.
("default/coursechooser", True, "Course Selection", 1),
# If we choose an invalid course, then we go to the profile to allow the user to add that course. The second validation failure seems to be about the ``for`` attribute of the ```<label class="readonly" for="auth_user_email" id="auth_user_email__label">`` tag, since the id ``auth_user_email`` isn't defined elsewhere.
("default/coursechooser/xxx", True, "Course IDs for open courses", 2),
("default/removecourse", True, "Course Selection", 1),
("default/removecourse/xxx", True, "Course Selection", 1),
(
"dashboard/studentreport",
True,
"Recent Activity",
1,
),
# **Designer**
# -------------
(
"designer/index",
True,
"This page allows you to select a book for your own class.",
1,
),
("designer/build", True, "Build a Custom", 1),
# **OAuth**
# ----------
(
"oauth/index",
False,
"This page is a utility for accepting redirects from external services like Spotify or LinkedIn that use oauth.",
1,
),
("books/index", False, "Runestone Test Book", 1),
("books/published", False, "Runestone Test Book", 1),
# TODO: Many other views!
],
)
def test_validate_user_pages(
url, requires_login, expected_string, expected_errors, test_client, test_user_1
):
if requires_login:
test_user_1.login()
else:
test_client.logout()
test_client.validate(url, expected_string, expected_errors)
# Validate the HTML in instructor-only pages.
# NOTE -- this is the start of a really really long decorator for test_2
@pytest.mark.parametrize(
"url, expected_string, expected_errors",
[
# **Default**
# ------------
# web2py-generated stuff produces two extra errors.
("default/bios", "Bios", 3),
# FIXME: The element ``<form id="editIndexRST" action="">`` in ``views/admin/admin.html`` produces the error ``Bad value \u201c\u201d for attribute \u201caction\u201d on element \u201cform\u201d: Must be non-empty.``.
#
# **Admin**
# ----------
("admin/admin", "Course Settings", 1),
# This endpoint produces JSON, so don't check it.
##("admin/course_students", '"test_user_1"', 2),
("admin/createAssignment", "ERROR", None),
("admin/grading", "assignment", 1),
# TODO: This produces an exception.
# ('admin/practice', 'Choose when students should start their practice.', 1),
# TODO: This deletes the course, making the test framework raise an exception. Need a separate case to catch this.
# ('admin/deletecourse', 'Manage Section', 2),
# FIXME: these raise an exception.
# ('admin/addinstructor', 'Trying to add non-user', 1), -- this is an api call
# ('admin/add_practice_items', 'xxx', 1), -- this is an api call
("admin/assignments", "Assignment", 6), # labels for hidden elements
# ('admin/backup', 'xxx', 1),
("admin/practice", "Choose when students should start", 1),
# ('admin/removeassign', 'Cannot remove assignment with id of', 1),
# ('admin/removeinstructor', 'xxx', 1),
# ('admin/removeStudents', 'xxx', 1),
("admin/get_assignment", "Error: assignment ID", 1),
("admin/get_assignment?assignmentid=junk", "Error: assignment ID", 1),
("admin/get_assignment?assignmentid=100", "Error: assignment ID", 1),
# TODO: added to the ``createAssignment`` endpoint so far.
# **Dashboard**
# --------------
("dashboard/index", "Instructor Dashboard", 1),
("dashboard/grades", "Gradebook", 1),
# TODO: This doesn't really test anything about either
# exercisemetrics or questiongrades other than properly handling a call with no information
("dashboard/exercisemetrics", "Instructor Dashboard", 1),
("dashboard/questiongrades", "Instructor Dashboard", 1),
],
)
def test_validate_instructor_pages(
url, expected_string, expected_errors, test_client, test_user, test_user_1
):
test_instructor_1 = test_user("test_instructor_1", "password_1", test_user_1.course)
test_instructor_1.make_instructor()
# Make sure that non-instructors are redirected.
test_client.logout()
test_client.validate(url, "Login")
test_user_1.login()
test_client.validate(url, "Insufficient privileges")
test_client.logout()
# Test the instructor results.
test_instructor_1.login()
test_client.validate(url, expected_string, expected_errors)
# Test the ``ajax/preview_question`` endpoint.
def test_preview_question(test_client, test_user_1):
preview_question = "ajax/preview_question"
# Passing no parameters should raise an error.
test_client.validate(preview_question, "Error: ")
# Passing something not JSON-encoded should raise an error.
test_client.validate(preview_question, "Error: ", data={"code": "xxx"})
# Passing invalid RST should produce a Sphinx warning.
test_client.validate(preview_question, "WARNING", data={"code": '"*hi"'})
# Passing valid RST with no Runestone component should produce an error.
test_client.validate(preview_question, "Error: ", data={"code": '"*hi*"'})
# Passing a string with Unicode should work. Note that 0x0263 == 611; the JSON-encoded result will use this.
test_client.validate(
preview_question,
r"\u03c0",
data={
"code": json.dumps(
dedent(
"""\
.. fillintheblank:: question_1
Mary had a π.
- :x: Whatever.
"""
)
)
},
)
# Verify that ``question_1`` is not in the database. TODO: This passes even if the ``DBURL`` env variable in ``ajax.py`` fucntion ``preview_question`` isn't deleted. So, this test doesn't work.
db = test_user_1.runestone_db_tools.db
assert len(db(db.fitb_answers.div_id == "question_1").select()) == 0
# TODO: Add a test case for when the runestone build produces a non-zero return code.
# Test the ``default/user/profile`` endpoint.
def test_user_profile(test_client, test_user_1):
test_user_1.login()
runestone_db_tools = test_user_1.runestone_db_tools
course_name = "test_course_2"
test_course_2 = runestone_db_tools.create_course(course_name)
# Test a non-existant course.
test_user_1.update_profile(
expected_string="Errors in form", course_name="does_not_exist"
)
# Test an invalid e-mail address. TODO: This doesn't produce an error message.
##test_user_1.update_profile(expected_string='Errors in form',
## email='not a valid e-mail address')
# Change the user's profile data; add a new course.
username = "a_different_username"
first_name = "a different first"
last_name = "a different last"
email = "a_different_email@foo.com"
test_user_1.update_profile(
username=username,
first_name=first_name,
last_name=last_name,
email=email,
course_name=course_name,
accept_tcp="",
is_free=True,
)
# Check the values.
db = runestone_db_tools.db
user = db(db.auth_user.id == test_user_1.user_id).select().first()
# The username shouldn't be changable.
assert user.username == test_user_1.username
assert user.first_name == first_name
assert user.last_name == last_name
# TODO: The e-mail address isn't updated.
# assert user.email == email
assert user.course_id == test_course_2.course_id
assert user.accept_tcp == False # noqa: E712
# TODO: I'm not sure where the section is stored.
# assert user.section == section
# Test that the course name is correctly preserved across registrations if other fields are invalid.
def test_registration(test_client, runestone_db_tools):
# Registration doesn't work unless we're logged out.
test_client.logout()
course_name = "a_course_name"
runestone_db_tools.create_course(course_name)
# Now, post the registration.
username = "username"
first_name = "first"
last_name = "last"
email = "e@mail.com"
password = "password"
test_client.validate(
"default/user/register",
"Please fix the following errors in your registration",
data=dict(
username=username,
first_name=first_name,
last_name=last_name,
# The e-mail address must be unique.
email=email,
password=password,
password_two=password + "oops",
# Note that ``course_id`` is (on the form) actually a course name.
course_id=course_name,
accept_tcp="on",
donate="0",
_next="/runestone/default/index",
_formname="register",
),
)
# Check that the pricing system works correctly.
def test_pricing(runestone_db_tools, runestone_env):
# Check the pricing.
default_controller = web2py_controller_import(runestone_env, "default")
db = runestone_db_tools.db
base_course = runestone_db_tools.create_course()
child_course = runestone_db_tools.create_course(
"test_child_course", base_course=base_course.course_name
)
# First, test on a base course.
for expected_price, actual_price in [(0, None), (0, -100), (0, 0), (15, 15)]:
db(db.courses.id == base_course.course_id).update(student_price=actual_price)
assert default_controller._course_price(base_course.course_id) == expected_price
# Test in a child course as well. Create a matrix of all base course prices by all child course prices.
for expected_price, actual_base_price, actual_child_price in [
(0, None, None),
(0, None, 0),
(0, None, -1),
(2, None, 2),
(0, 0, None),
(0, 0, 0),
(0, 0, -1),
(2, 0, 2),
(0, -2, None),
(0, -2, 0),
(0, -2, -1),
(2, -2, 2),
(3, 3, None),
(0, 3, 0),
(0, 3, -1),
(2, 3, 2),
]:
db(db.courses.id == base_course.course_id).update(
student_price=actual_base_price
)
db(db.courses.id == child_course.course_id).update(
student_price=actual_child_price
)
assert (
default_controller._course_price(child_course.course_id) == expected_price
)
# Check that setting the price causes redirects to the correct location (payment vs. donation) when registering for a course or adding a new course.
def test_price_free(runestone_db_tools, test_user):
db = runestone_db_tools.db
course_1 = runestone_db_tools.create_course(student_price=0)
course_2 = runestone_db_tools.create_course("test_course_2", student_price=0)
# Check registering for a free course.
test_user_1 = test_user("test_user_1", "password_1", course_1, is_free=True)
# Verify the user was added to the ``user_courses`` table.
assert (
db(
(db.user_courses.course_id == test_user_1.course.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
# Check adding a free course.
test_user_1.update_profile(course_name=course_2.course_name, is_free=True)
# Same as above.
assert (
db(
(db.user_courses.course_id == course_2.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
def test_price_paid(runestone_db_tools, test_user):
db = runestone_db_tools.db
# Check registering for a paid course.
course_1 = runestone_db_tools.create_course(student_price=1)
course_2 = runestone_db_tools.create_course("test_course_2", student_price=1)
# Check registering for a paid course.
test_user_1 = test_user("test_user_1", "password_1", course_1, is_free=False)
# Until payment is provided, the user shouldn't be added to the ``user_courses`` table. Ensure that refresh, login/logout, profile changes, adding another class, etc. don't allow access.
test_user_1.test_client.logout()
test_user_1.login()
test_user_1.test_client.validate("default/index")
# Check adding a paid course.
test_user_1.update_profile(course_name=course_2.course_name, is_free=False)
# Verify no access without payment.
assert (
not db(
(db.user_courses.course_id == course_1.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
assert (
not db(
(db.user_courses.course_id == course_2.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
# Check that payments are handled correctly.
def test_payments(runestone_controller, runestone_db_tools, test_user):
if not runestone_controller.settings.STRIPE_SECRET_KEY:
pytest.skip("No Stripe keys provided.")
db = runestone_db_tools.db
course_1 = runestone_db_tools.create_course(student_price=100)
test_user_1 = test_user("test_user_1", "password_1", course_1, is_free=False)
def did_payment():
return (
db(
(db.user_courses.course_id == course_1.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
# Test some failing tokens.
assert not did_payment()
for token in ["tok_chargeCustomerFail", "tok_chargeDeclined"]:
test_user_1.make_payment(token)
assert not did_payment()
test_user_1.make_payment("tok_visa")
assert did_payment()
# Check that the payment record is correct.
payment = (
db(
(db.user_courses.user_id == test_user_1.user_id)
& (db.user_courses.course_id == course_1.course_id)
& (db.user_courses.id == db.payments.user_courses_id)
)
.select(db.payments.charge_id)
.first()
)
assert payment.charge_id
# Test the LP endpoint.
@pytest.mark.skipif(six.PY2, reason="Requires Python 3.")
def test_lp(test_user_1):
test_user_1.login()
# Check that omitting parameters produces an error.
ret = test_user_1.hsblog(event="lp_build")
assert "No feedback provided" in ret["errors"][0]
# Check that database entries are validated.
ret = test_user_1.hsblog(
event="lp_build",
# This div_id is too long. Everything else is OK.
div_id="X" * 1000,
course=test_user_1.course.course_name,
builder="unsafe-python",
answer=json.dumps({"code_snippets": ["def one(): return 1"]}),
)
assert "div_id" in ret["errors"][0]
# Check a passing case
def assert_passing():
ret = test_user_1.hsblog(
event="lp_build",
div_id="test_lp_1",
course=test_user_1.course.course_name,
builder="unsafe-python",
answer=json.dumps({"code_snippets": ["def one(): return 1"]}),
)
assert "errors" not in ret
assert ret["correct"] == 100
assert_passing()
# Send lots of jobs to test out the queue. Skip this for now -- not all the useinfo entries get deleted, which causes ``test_getNumOnline`` to fail.
if False:
threads = [Thread(target=assert_passing) for x in range(5)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Test dynamic book routing.
def test_dynamic_book_routing_1(test_client, test_user_1):
test_user_1.login()
dbr_tester(test_client, test_user_1, True)
# Test that a draft is accessible only to instructors.
test_user_1.make_instructor()
test_user_1.update_profile(course_name=test_user_1.course.course_name)
test_client.validate(
"books/draft/{}/index.html".format(test_user_1.course.base_course),
"The red car drove away.",
)
# Test the no-login case.
def test_dynamic_book_routing_2(test_client, test_user_1):
test_client.logout()
# Test for a book that doesn't require a login. First, change the book to not require a login.
db = test_user_1.runestone_db_tools.db
db(db.courses.course_name == test_user_1.course.base_course).update(
login_required=False
)
db.commit()
dbr_tester(test_client, test_user_1, False)
def dbr_tester(test_client, test_user_1, is_logged_in):
# Test error cases.
validate = test_client.validate
base_course = test_user_1.course.base_course
# A non-existant course.
if is_logged_in:
validate("books/published/xxx", "Course Selection")
else:
validate("books/published/xxx", expected_status=404)
# A non-existant page.
validate("books/published/{}/xxx".format(base_course), expected_status=404)
# A directory.
validate(
"books/published/{}/test_chapter_1".format(base_course), expected_status=404
)
# Attempt to access files outside a course.
validate("books/published/{}/../conf.py".format(base_course), expected_status=404)
# Attempt to access a course we're not registered for. TODO: Need to create another base course for this to work.
##if is_logged_in:
## #validate('books/published/{}/index.html'.format(base_course), [
## 'Sorry you are not registered for this course.'
## ])
# A valid page. Check the book config as well.
validate(
"books/published/{}/index.html".format(base_course),
[
"The red car drove away.",
"eBookConfig.course = '{}';".format(
test_user_1.course.course_name if is_logged_in else base_course
),
"eBookConfig.basecourse = '{}';".format(base_course),
],
)
# Drafts shouldn't be accessible by students.
validate(
"books/draft/{}/index.html".format(base_course),
"Insufficient privileges" if is_logged_in else "Username",
)
# Check routing in a base course.
if is_logged_in:
test_user_1.update_profile(
course_name=test_user_1.course.base_course, is_free=True
)
validate(
"books/published/{}/index.html".format(base_course),
[
"The red car drove away.",
"eBookConfig.course = '{}';".format(base_course),
"eBookConfig.basecourse = '{}';".format(base_course),
],
)
# Test static content.
validate(
"books/published/{}/_static/basic.css".format(base_course),
"Sphinx stylesheet -- basic theme.",
)
def test_assignments(test_client, runestone_db_tools, test_user):
course_3 = runestone_db_tools.create_course("test_course_3")
test_instructor_1 = test_user("test_instructor_1", "password_1", course_3)
test_instructor_1.make_instructor()
test_instructor_1.login()
db = runestone_db_tools.db
name_1 = "test_assignment_1"
name_2 = "test_assignment_2"
name_3 = "test_assignment_3"
# Create an assignment -- using createAssignment
test_client.post("admin/createAssignment", data=dict(name=name_1))
assign1 = (
db(
(db.assignments.name == name_1)
& (db.assignments.course == test_instructor_1.course.course_id)
)
.select()
.first()
)
assert assign1
# Make sure you can't create two assignments with the same name
test_client.post("admin/createAssignment", data=dict(name=name_1))
assert "EXISTS" in test_client.text
# Rename assignment
test_client.post("admin/createAssignment", data=dict(name=name_2))
assign2 = (
db(
(db.assignments.name == name_2)
& (db.assignments.course == test_instructor_1.course.course_id)
)
.select()
.first()
)
assert assign2
test_client.post(
"admin/renameAssignment", data=dict(name=name_3, original=assign2.id)
)
assert db(db.assignments.name == name_3).select().first()
assert not db(db.assignments.name == name_2).select().first()
# Make sure you can't rename an assignment to an already used assignment
test_client.post(
"admin/renameAssignment", data=dict(name=name_3, original=assign1.id)
)
assert "EXISTS" in test_client.text
# Delete an assignment -- using removeassignment
test_client.post("admin/removeassign", data=dict(assignid=assign1.id))
assert not db(db.assignments.name == name_1).select().first()
test_client.post("admin/removeassign", data=dict(assignid=assign2.id))
assert not db(db.assignments.name == name_3).select().first()
test_client.post("admin/removeassign", data=dict(assignid=9999999))
assert "Error" in test_client.text
def test_instructor_practice_admin(test_client, runestone_db_tools, test_user):
course_4 = runestone_db_tools.create_course("test_course_1")
test_student_1 = test_user("test_student_1", "password_1", course_4)
test_student_1.logout()
test_instructor_1 = test_user("test_instructor_1", "password_1", course_4)
test_instructor_1.make_instructor()
test_instructor_1.login()
db = runestone_db_tools.db
course_start_date = datetime.datetime.strptime(
course_4.term_start_date, "%Y-%m-%d"
).date()
start_date = course_start_date + datetime.timedelta(days=13)
end_date = datetime.datetime.today().date() + datetime.timedelta(days=30)
max_practice_days = 40
max_practice_questions = 400
day_points = 1
question_points = 0.2
questions_to_complete_day = 5
graded = 0
# Test the practice tool settings for the course.
flashcard_creation_method = 2
test_client.post(
"admin/practice",
data={
"StartDate": start_date,
"EndDate": end_date,
"graded": graded,
"maxPracticeDays": max_practice_days,
"maxPracticeQuestions": max_practice_questions,
"pointsPerDay": day_points,
"pointsPerQuestion": question_points,
"questionsPerDay": questions_to_complete_day,
"flashcardsCreationType": 2,
"question_points": question_points,
},
)
practice_settings_1 = (
db(
(db.course_practice.auth_user_id == test_instructor_1.user_id)
& (db.course_practice.course_name == course_4.course_name)
& (db.course_practice.start_date == start_date)
& (db.course_practice.end_date == end_date)
& (
db.course_practice.flashcard_creation_method
== flashcard_creation_method
)
& (db.course_practice.graded == graded)
)
.select()
.first()
)
assert practice_settings_1
if practice_settings_1.spacing == 1:
assert practice_settings_1.max_practice_days == max_practice_days
assert practice_settings_1.day_points == day_points
assert (
practice_settings_1.questions_to_complete_day == questions_to_complete_day
)
else:
assert practice_settings_1.max_practice_questions == max_practice_questions
assert practice_settings_1.question_points == question_points
# Test instructor adding a subchapter to the practice tool for students.
# I need to call set_tz_offset to set timezoneoffset in the session.
test_client.post("ajax/set_tz_offset", data={"timezoneoffset": 0})
# The reason I'm manually stringifying the list value is that test_client.post does something strange with compound objects instead of passing them to json.dumps.
test_client.post(
"admin/add_practice_items",
data={"data": '["1. Test chapter 1/1.2 Subchapter B"]'},
)
practice_settings_1 = (
db(
(db.user_topic_practice.user_id == test_student_1.user_id)
& (db.user_topic_practice.course_name == course_4.course_name)
& (db.user_topic_practice.chapter_label == "test_chapter_1")
& (db.user_topic_practice.sub_chapter_label == "subchapter_b")
)
.select()
.first()
)
assert practice_settings_1
def test_deleteaccount(test_client, runestone_db_tools, test_user):
course_3 = runestone_db_tools.create_course("test_course_3")
the_user = test_user("user_to_delete", "password_1", course_3)
the_user.login()
validate = the_user.test_client.validate
the_user.hsblog(
event="mChoice",
act="answer:1:correct",
answer="1",
correct="T",
div_id="subc_b_1",
course="test_course_3",
)
validate("default/delete", "About Runestone", data=dict(deleteaccount="checked"))
db = runestone_db_tools.db
res = db(db.auth_user.username == "user_to_delete").select().first()
print(res)
time.sleep(2)
assert not db(db.useinfo.sid == "user_to_delete").select().first()
assert not db(db.code.sid == "user_to_delete").select().first()
for t in [
"clickablearea",
"codelens",
"dragndrop",
"fitb",
"lp",
"mchoice",
"parsons",
"shortanswer",
]:
assert (
not db(db["{}_answers".format(t)].sid == "user_to_delete").select().first()
)
# Test the grades report.
# When this test fails it is very very difficult to figure out why.
# The data structures being compared are very large which makes it very very
# difficult to pin down what is failing. In addition it seems there is a dictionary
# in here somewhere where the order of things shifts around. I think it is currenly
# broken because more components now return a percent correct value.
@pytest.mark.skip(reason="TODO: This test is unpredictable and needs to be updated.")
def test_grades_1(runestone_db_tools, test_user, tmp_path):
# Create test users.
course = runestone_db_tools.create_course()
course_name = course.course_name
# **Create test data**
# ======================
# Create test users.
test_user_array = [
test_user(
"test_user_{}".format(index), "x", course, last_name="user_{}".format(index)
)
for index in range(4)
]
def assert_passing(index, *args, **kwargs):
res = test_user_array[index].hsblog(*args, **kwargs)
assert "errors" not in res
# Prepare common arguments for each question type.
shortanswer_kwargs = dict(
event="shortanswer", div_id="test_short_answer_1", course=course_name
)
fitb_kwargs = dict(event="fillb", div_id="test_fitb_1", course=course_name)
mchoice_kwargs = dict(event="mChoice", div_id="test_mchoice_1", course=course_name)
lp_kwargs = dict(
event="lp_build",
div_id="test_lp_1",
course=course_name,
builder="unsafe-python",
)
unittest_kwargs = dict(event="unittest", div_id="units2", course=course_name)
# *User 0*: no data supplied
##----------------------------
# *User 1*: correct answers
##---------------------------
# It doesn't matter which user logs out, since all three users share the same client.
logout = test_user_array[2].test_client.logout
logout()
test_user_array[1].login()
assert_passing(1, act=test_user_array[1].username, **shortanswer_kwargs)
assert_passing(1, answer=json.dumps(["red", "away"]), **fitb_kwargs)
assert_passing(1, answer="0", correct="T", **mchoice_kwargs)
assert_passing(
1, answer=json.dumps({"code_snippets": ["def one(): return 1"]}), **lp_kwargs
)
assert_passing(1, act="percent:100:passed:2:failed:0", **unittest_kwargs)
# *User 2*: incorrect answers
##----------------------------
logout()
test_user_array[2].login()
# Add three shortanswer answers, to make sure the number of attempts is correctly recorded.
for x in range(3):
assert_passing(2, act=test_user_array[2].username, **shortanswer_kwargs)
assert_passing(2, answer=json.dumps(["xxx", "xxxx"]), **fitb_kwargs)
assert_passing(2, answer="1", correct="F", **mchoice_kwargs)
assert_passing(
2, answer=json.dumps({"code_snippets": ["def one(): return 2"]}), **lp_kwargs
)
assert_passing(2, act="percent:50:passed:1:failed:1", **unittest_kwargs)
# *User 3*: no data supplied, and no longer in course.
##----------------------------------------------------
# Wait until the autograder is run to remove the student, so they will have a grade but not have any submissions.
# **Test the grades_report endpoint**
##====================================
tu = test_user_array[2]
def grades_report(assignment, *args, **kwargs):
return tu.test_client.validate(
"assignments/grades_report",
*args,
data=dict(chap_or_assign=assignment, report_type="assignment"),
**kwargs
)
# Test not being an instructor.
grades_report("", "About Runestone")
tu.make_instructor()
# Test an invalid assignment.
grades_report("", "Unknown assignment")
# Create an assignment.
assignment_name = "test_assignment"
assignment_id = json.loads(
tu.test_client.validate(
"admin/createAssignment", data={"name": assignment_name}
)
)[assignment_name]
assignment_kwargs = dict(
assignment=assignment_id, autograde="pct_correct", which_to_grade="first_answer"
)
# Add questions to the assignment.
def add_to_assignment(question_kwargs, points):
assert (
tu.test_client.validate(
"admin/add__or_update_assignment_question",
data=dict(
question=question_kwargs["div_id"],
points=points,
**assignment_kwargs
),
)
!= json.dumps("Error")
)
# Determine the order of the questions and the _`point values`.
add_to_assignment(shortanswer_kwargs, 0)
add_to_assignment(fitb_kwargs, 1)
add_to_assignment(mchoice_kwargs, 2)
add_to_assignment(lp_kwargs, 3)
add_to_assignment(unittest_kwargs, 4)
# Autograde the assignment.
assignment_kwargs = dict(data={"assignment": assignment_name})
assert json.loads(
tu.test_client.validate("assignments/autograde", **assignment_kwargs)
)["message"].startswith("autograded")
assert json.loads(
tu.test_client.validate("assignments/calculate_totals", **assignment_kwargs)
)["success"]
# Remove test user 3 from the course. They can't be removed from the current course, so create a new one then add this user to it.
logout()
tu = test_user_array[3]
tu.login()
new_course = runestone_db_tools.create_course("random_course_name")
tu.update_profile(course_name=new_course.course_name, is_free=True)
tu.coursechooser(new_course.course_name)
tu.removecourse(course_name)
# **Test this assignment.**
# ===========================
# Log back in as the instructor.
logout()
tu = test_user_array[2]
tu.login()
# Now, we can get the report.
grades = json.loads(grades_report(assignment_name))
# Define a regex string comparison.
class RegexEquals:
def __init__(self, regex):
self.regex = re.compile(regex)
def __eq__(self, other):
return bool(re.search(self.regex, other))
# See if a date in ISO format followed by a "Z" is close to the current time.
class AlmostNow:
def __eq__(self, other):
# Parse the date string. Assume it ends with a Z and discard this.
assert other and other[-1] == "Z"
# Per the `docs <https://docs.python.org/3/library/datetime.html#datetime.date.fromisoformat>`_, this function requires Python 3.7+.
if sys.version_info >= (3, 7):
dt = datetime.datetime.fromisoformat(other[:-1])
return datetime.datetime.utcnow() - dt < datetime.timedelta(minutes=1)
else:
# Hope for the best on older Python.
return True
# These are based on the data input for each user earlier in this test.
expected_grades = {
"colHeaders": [
"userid",
"Family name",
"Given name",
"e-mail",
"avg grade (%)",
"1",
"1",
"1",
"2.1",
"2",
],
"data": [
[
"div_id",
"",
"",
"",
"",
"test_short_answer_1",
"test_fitb_1",
"test_mchoice_1",
"test_lp_1",
"units2",
],
[
"location",
"",
"",
"",
"",
"index - ",
"index - ",
"index - ",
"lp_demo.py - ",
"index - ",
],
[
"type",
"",
"",
"",
"",
"shortanswer",
"fillintheblank",
"mchoice",
"lp_build",
"activecode",
],
# See the `point values`_ assigned earlier.
["points", "", "", "", "", 0, 1, 2, 3, 4],
["avg grade (%)", "", "", "", ""],
["avg attempts", "", "", "", ""],
["test_user_0", "user_0", "test", "test_user_0@foo.com", 0.0],
["test_user_1", "user_1", "test", "test_user_1@foo.com", 1.0],
["test_user_2", "user_2", "test", "test_user_2@foo.com", 0.2],
["test_user_3", "user_3", "test", "test_user_3@foo.com", 0.0],
],
# Correct since the first 3 questions are all on the index page.
"mergeCells": [{"col": 5, "colspan": 3, "row": 1, "rowspan": 1}],
"orig_data": [
# User 0: not submitted.
[
# The format is:
# ``[timestamp, score, answer, correct, num_attempts]``.
[None, 0.0, None, None, None], # shortanswer
[None, 0.0, None, None, None], # fillintheblank
[None, 0.0, None, None, None], # mchoice
[None, 0.0, {}, None, None], # lp_build
[None, 0.0, "", None, None], # activecode
],
# User 1: all correct.
[
[AlmostNow(), 0.0, "test_user_1", None, 1],
[AlmostNow(), 1.0, ["red", "away"], True, 1],
[AlmostNow(), 2.0, [0], True, 1],
[
AlmostNow(),
3.0,
{"code_snippets": ["def one(): return 1"], "resultString": ""},
100.0,
1,
],
[AlmostNow(), 4.0, "percent:100:passed:2:failed:0", True, 1],
],
# User 2: all incorrect.
[
[AlmostNow(), 0.0, "test_user_2", None, 3],
[AlmostNow(), 0.0, ["xxx", "xxxx"], False, 1],
[AlmostNow(), 0.0, [1], False, 1],
[
AlmostNow(),
0.0,
{
"code_snippets": ["def one(): return 2"],
"resultString": RegexEquals(
"Traceback \\(most recent call last\\):\n"
" File "
# Use a regex for the file's path.
'"\\S*lp_demo-test.py", '
"line 6, in <module>\n"
" assert one\\(\\) == 1\n"
"AssertionError"
),
},
0.0,
1,
],
[AlmostNow(), 2.0, "percent:50:passed:1:failed:1", False, 1],
],
# User 3: not submitted.
[
# The format is:
[None, 0.0, None, None, None],
[None, 0.0, None, None, None],
[None, 0.0, None, None, None],
[None, 0.0, {}, None, None],
[None, 0.0, "", None, None],
],
],
}
# Note: on test failure, pytest will report as incorrect all the ``AlmostNow()`` and ``RegexEquals`` items, even though they may have actually compared as equal.
# assert grades == expected_grades
# lets break this up a bit.
for k in expected_grades:
assert grades[k] == expected_grades[k]
logout()
# Test with no login.
grades_report("", "About Runestone")
def test_pageprogress(test_client, runestone_db_tools, test_user_1):
test_user_1.login()
test_user_1.hsblog(
event="mChoice",
act="answer:1:correct",
answer="1",
correct="T",
div_id="subc_b_1",
course=test_user_1.course.course_name,
)
# Since the user has answered the question the count for subc_b_1 should be 1
# cannot test the totals on the client without javascript but that is covered in the
# selenium tests on the components side.
test_user_1.test_client.validate(
"books/published/{}/test_chapter_1/subchapter_b.html".format(
test_user_1.course.base_course
),
'"subc_b_1": 1',
)
assert '"LearningZone_poll": 0' in test_user_1.test_client.text
assert '"subc_b_fitb": 0' in test_user_1.test_client.text
def test_lockdown(test_client, test_user_1):
test_user_1.login()
base_course = test_user_1.course.base_course
res = test_client.validate("books/published/{}/index.html".format(base_course))
assert '/default/user/login"> </a>' in res
assert "Runestone in social media:" in res
assert ">Change Course</a></li>" in res
assert 'id="profilelink">Edit' in res
assert '<ul class="dropdown-menu user-menu">' in res
assert "<span id='numuserspan'></span><span class='loggedinuser'></span>" in res
assert '<script async src="https://hypothes.is/embed.js"></script>' in res
|
telepoints.py
|
import sys
import telepot
from telepot.delegate import per_chat_id_in, call, create_open
import settings
from peewee import *
"""
telepoints.py
"""
# Simulate a database to store unread messages
class UnreadStore(object):
def __init__(self):
self._db = {}
def put(self, msg):
chat_id = msg['chat']['id']
if chat_id not in self._db:
self._db[chat_id] = []
self._db[chat_id].append(msg)
# Pull all unread messages of a `chat_id`
def pull(self, chat_id):
messages = self._db[chat_id]
del self._db[chat_id]
# sort by date
messages.sort(key=lambda m: m['date'])
return messages
# Tells how many unread messages per chat_id
def unread_per_chat(self):
return [(k,len(v)) for k,v in self._db.items()]
# Accept commands from owner. Give him unread messages.
class OwnerHandler(telepot.helper.ChatHandler):
def __init__(self, seed_tuple, timeout, store):
super(OwnerHandler, self).__init__(seed_tuple, timeout)
self._store = store
def _read_messages(self, messages):
for msg in messages:
# assume all messages are text
self.sender.sendMessage(msg['text'])
def on_message(self, msg):
content_type, chat_type, chat_id = telepot.glance2(msg)
if content_type != 'text':
self.sender.sendMessage("I don't understand")
return
command = msg['text'].strip().lower()
# Tells who has sent you how many messages
if command == '/unread':
results = self._store.unread_per_chat()
lines = []
for r in results:
n = 'ID: %d\n%d unread' % r
lines.append(n)
if not len(lines):
self.sender.sendMessage('No unread messages')
else:
self.sender.sendMessage('\n'.join(lines))
# read next sender's messages
elif command == '/next':
results = self._store.unread_per_chat()
if not len(results):
self.sender.sendMessage('No unread messages')
return
chat_id = results[0][0]
unread_messages = self._store.pull(chat_id)
self.sender.sendMessage('From ID: %d' % chat_id)
self._read_messages(unread_messages)
else:
self.sender.sendMessage("I don't understand")
class MessageSaver(telepot.helper.Monitor):
def __init__(self, seed_tuple, store, exclude):
# The `capture` criteria means to capture all messages.
super(MessageSaver, self).__init__(seed_tuple, capture=[{'_': lambda msg: True}])
self._store = store
self._exclude = exclude
# Store every message, except those whose sender is in the exclude list, or non-text messages.
def on_message(self, msg):
content_type, chat_type, chat_id = telepot.glance2(msg)
if chat_id in self._exclude:
print('Chat id %d is excluded.' % chat_id)
return
if content_type != 'text':
print('Content type %s is ignored.' % content_type)
return
print('Storing message: %s' % msg)
self._store.put(msg)
import threading
class CustomThread(threading.Thread):
def start(self):
print('CustomThread starting ...')
super(CustomThread, self).start()
# Note how this function wraps around the `call()` function below to implement
# a custom thread for delegation.
def custom_thread(func):
def f(seed_tuple):
target = func(seed_tuple)
if type(target) is tuple:
run, args, kwargs = target
t = CustomThread(target=run, args=args, kwargs=kwargs)
else:
t = CustomThread(target=target)
return t
return f
class ChatBox(telepot.DelegatorBot):
def __init__(self, token, owner_id):
self._owner_id = owner_id
self._seen = set()
self._store = UnreadStore()
super(ChatBox, self).__init__(token, [
# Here is a delegate to specially handle owner commands.
(per_chat_id_in([owner_id]), create_open(OwnerHandler, 20, self._store)),
# Seed is always the same, meaning only one MessageSaver is ever spawned for entire application.
(lambda msg: 1, create_open(MessageSaver, self._store, exclude=[owner_id])),
# For senders never seen before, send him a welcome message.
(self._is_newcomer, custom_thread(call(self._send_welcome))),
])
# seed-calculating function: use returned value to indicate whether to spawn a delegate
def _is_newcomer(self, msg):
chat_id = msg['chat']['id']
if chat_id in self._seen: # Sender has been seen before
# execute statemachine
return None # No delegate spawned
self._seen.add(chat_id)
return [] # non-hashable ==> delegates are independent, no seed association is made.
def _send_welcome(self, seed_tuple):
chat_id = seed_tuple[1]['chat']['id']
print('Sending welcome ...')
self.sendMessage(chat_id, 'Hello!')
TOKEN = settings.TOKEN # get token from settings file
OWNER_ID = settings.OWNER
bot = ChatBox(TOKEN, OWNER_ID)
bot.notifyOnMessage(run_forever=True)
|
base_consumer.py
|
from multiprocessing import Process
from confluent_kafka import Consumer
from abc import ABC, abstractmethod, abstractproperty
import sys
import ast
import logging
from functools import wraps
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('consumer')
def multiprocess(fn):
@wraps(fn)
def call(*args, **kwargs):
p = Process(target=fn, args=args, kwargs=kwargs)
p.start()
return p
return call
class BaseConsumer(ABC):
@abstractproperty
def topic(self):
pass
@abstractproperty
def group_id(self):
pass
@abstractmethod
def on_data(self, data):
pass
def __init__(self):
self.config = {
'bootstrap.servers': 'localhost:9093',
'group.id': self.group_id,
'auto.offset.reset': 'smallest',
}
self.running = True
@multiprocess
def listen(self):
logger.info("Starting consumer... {}".format(self.__class__.__name__))
consumer = Consumer(self.config)
try:
consumer.subscribe([self.topic])
while self.running:
msg = consumer.poll(1.0)
if msg is None:
continue
if msg.error():
logger.error("Consumer error: {}".format(msg.error()))
continue
logger.info('Received message: {}; Group id: {}'.format(msg.value().decode('utf-8'), self.group_id))
self.on_data(self.parse_data(msg.value().decode('utf-8')) )
consumer.close()
except KeyboardInterrupt:
logger.info("Exiting...")
sys.exit(1)
finally:
consumer.close()
def parse_data(self, data):
try:
return ast.literal_eval(data)
except Exception as e:
logger.error("Error: {}".format(e))
finally:
return data
def shutdown(self):
self.running = False
|
daemon.py
|
import multiprocessing as mp
import os.path
import time
from logging import Logger
from os import walk
from typing import List
from watchdog.events import RegexMatchingEventHandler
from watchdog.observers import Observer
from monitor.config import DaemonConfig
from monitor.file_processor import file_processor
class ProcessPool:
"""Helper class to start multiple processes"""
_pool: List[mp.Process]
def __init__(self, size, target=None, name=None, daemon=None, args=(), kwargs={}) -> None:
self._pool = [mp.Process(target=target, name=name, daemon=daemon, args=args, kwargs=kwargs) for _ in range(size)]
def start(self):
for p in self._pool: p.start()
def close(self):
for p in self._pool: p.close()
def terminate(self):
for p in self._pool: p.terminate()
def join(self):
for p in self._pool: p.join()
def is_alive(self):
return all(p.is_alive() for p in self._pool)
class Daemon:
"""Class to monitor a directory and parse any files in it"""
config: DaemonConfig
logger: Logger
_queue: mp.Queue
def __init__(self, config: DaemonConfig, logger: Logger) -> None:
self.config = config
self.logger = logger
self._queue = mp.Queue()
def run(self, path: str):
observer = processor = None
try:
observer = self._start_file_observer(path)
processor = self._start_file_processor()
# Loop through existing files in the path and add them to the queue
self._queue_existing_files(path)
while True:
time.sleep(1)
# If either process has die, terminate
if not observer.is_alive() or not processor.is_alive():
break
except KeyboardInterrupt:
self.logger.info('stopping')
finally:
# Kill sub processes and wait until they stop
if observer and observer.is_alive():
observer.stop()
observer.join()
if processor and processor.is_alive():
processor.terminate()
processor.join()
processor.close()
def _get_on_create_handler(self):
def on_create(event):
self.logger.info('monitoring \'{}\' for steady state'.format(event.src_path))
new_size = os.path.getsize(event.src_path)
while True:
old_size = new_size
time.sleep(5)
new_size = os.path.getsize(event.src_path)
if old_size == new_size:
self.logger.info('monitoring \'{}\' finished'.format(event.src_path))
break
self._queue.put(event.src_path)
return on_create
def _start_file_observer(self, path: str) -> Observer:
event_handler = RegexMatchingEventHandler(
regexes=['^.*\.aax$'],
ignore_regexes=[],
ignore_directories=True,
case_sensitive=False,
)
event_handler.on_created = self._get_on_create_handler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
self.logger.info('watching \'{}\''.format(path))
observer.start()
return observer
def _start_file_processor(self) -> ProcessPool:
self.logger.info('Starting file processor')
lock = mp.Lock()
pool = ProcessPool(
self.config.threads,
target=file_processor,
args=(self.config, self._queue, lock, self.logger.level))
pool.start()
return pool
def _queue_existing_files(self, path: str):
for (dirpath, _, filenames) in walk(path):
for file in filenames:
if len(file) > 4 and file[-4:].lower() == '.aax':
self._queue.put(os.path.join(dirpath, file))
|
messagebus_test.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This is a unittest for the message buss
It's important to note that this requires this test to run mycroft service
to test the buss. It is not expected that the service be already running
when the tests are ran.
"""
import time
import unittest
from subprocess import Popen, call
from threading import Thread
from mycroft.messagebus.client import MessageBusClient
from mycroft.messagebus.message import Message
class TestMessagebusMethods(unittest.TestCase):
"""This class is for testing the messsagebus.
It currently only tests send and receive. The tests could include
more.
"""
def setUp(self):
"""
This sets up for testing the message buss
This requires starting the mycroft service and creating two
WebsocketClient object to talk with eachother. Not this is
threaded and will require cleanup
"""
# start the mycroft service. and get the pid of the script.
self.pid = Popen(["python", "mycroft/messagebus/service/main.py"]).pid
# Create the two web clients
self.ws1 = MessageBusClient()
self.ws2 = MessageBusClient()
# init the flags for handler's
self.handle1 = False
self.handle2 = False
# Start threads to handle websockets
Thread(target=self.ws1.run_forever).start()
Thread(target=self.ws2.run_forever).start()
# Setup handlers for each of the messages.
self.ws1.on('ws1.message', self.onHandle1)
self.ws2.on('ws2.message', self.onHandle2)
def onHandle1(self, event):
"""This is the handler for ws1.message
This for now simply sets a flag to true when received.
Args:
event(Message): this is the message received
"""
self.handle1 = True
def onHandle2(self, event):
"""This is the handler for ws2.message
This for now simply sets a flag to true when received.
Args:
event(Message): this is the message received
"""
self.handle2 = True
def tearDown(self):
"""This is the clean up for the tests
This will close the websockets ending the threads then kill the
mycroft service that was started in setUp.
"""
self.ws1.close()
self.ws2.close()
retcode = call(["kill", "-9", str(self.pid)])
def test_ClientServer(self):
"""This is the test to send a message from each of the websockets
to the other.
"""
# Send the messages
self.ws2.emit(Message('ws1.message'))
self.ws1.emit(Message('ws2.message'))
# allow time for messages to be processed
time.sleep(0.2)
# Check that both of the handlers were called.
self.assertTrue(self.handle1)
self.assertTrue(self.handle2)
class TestMessageMethods(unittest.TestCase):
"""This tests the Message class functions
"""
def setUp(self):
"""This sets up some basic messages for testing.
"""
self.empty_message = Message("empty")
self.message1 = Message("enclosure.reset")
self.message2 = Message("enclosure.system.blink",
{'target': 4}, {'target': 5})
self.message3 = Message("status", "OK")
# serialized results of each of the messages
self.serialized = ['{"data": {}, "type": "empty", "context": null}',
'{"data": {}, "type": "enclosure.reset",\
"context": null}',
'{"data": { "target": 4}, \
"type": "enclosure.system.blink", \
"context": {"target": 5}}',
'{"data": "OK", "type": "status", \
"context": null}']
def test_serialize(self):
"""This test the serialize method
"""
self.assertEqual(self.empty_message.serialize(), self.serialized[0])
self.assertEqual(self.message1.serialize(), self.serialized[1])
self.assertEqual(self.message2.serialize(), self.serialized[2])
self.assertEqual(self.message3.serialize(), self.serialized[3])
def test_deserialize(self):
"""This test's the deserialize method
"""
messages = []
# create the messages from the serialized strings above
messages.append(Message.deserialize(self.serialized[0]))
messages.append(Message.deserialize(self.serialized[1]))
messages.append(Message.deserialize(self.serialized[2]))
# check the created messages match the strings
self.assertEqual(messages[0].serialize(), self.serialized[0])
self.assertEqual(messages[1].serialize(), self.serialized[1])
self.assertEqual(messages[2].serialize(), self.serialized[2])
def test_reply(self):
"""This tests the reply method
This is probably incomplete as the use of the reply message escapes me.
"""
message = self.empty_message.reply("status", "OK")
self.assertEqual(message.serialize(),
'{"data": "OK", "type": "status", "context": {}}')
message = self.message1.reply("status", "OK")
self.assertEqual(message.serialize(),
'{"data": "OK", "type": "status", "context": {}}')
message = self.message2.reply("status", "OK")
def test_publish(self):
"""This is for testing the publish method
TODO: Needs to be completed
"""
pass
if __name__ == '__main__':
"""This is to start the testing"""
unittest.main()
|
main.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main training script."""
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import dataloader
import det_model_fn
import hparams_config
import utils
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string('eval_name', default=None, help='Eval job name')
flags.DEFINE_enum('strategy', None, ['tpu', 'gpus', ''],
'Training: gpus for multi-gpu, if None, use TF default.')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
flags.DEFINE_bool(
'use_xla', False,
'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA, '
'and this flag has no effect.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string(
'backbone_ckpt', '', 'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('ckpt', None,
'Start training from this EfficientDet checkpoint.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')
flags.DEFINE_integer(
'num_cores_per_replica',
default=4,
help='Number of TPU cores per replica when using spatial partition.')
flags.DEFINE_multi_integer(
'input_partition_dims', [1, 2, 2, 1],
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('train_batch_size', 64, 'global training batch size')
flags.DEFINE_integer('eval_batch_size', 1, 'global evaluation batch size')
flags.DEFINE_integer('eval_samples', None, 'Number of samples for eval.')
flags.DEFINE_integer('iterations_per_loop', 100,
'Number of iterations per TPU training loop')
flags.DEFINE_integer('save_checkpoints_steps', 100,
'Number of iterations per checkpoint save')
flags.DEFINE_string(
'training_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('validation_file_pattern', None,
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string(
'val_json_file', None,
'COCO validation JSON containing golden bounding boxes. If None, use the '
'ground truth from the dataloader. Ignored if testdev_dir is not None.')
flags.DEFINE_string('testdev_dir', None,
'COCO testdev dir. If not None, ignorer val_json_file.')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', None, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train',
'Mode to run: train or eval (default: train)')
flags.DEFINE_string('model_name', 'efficientdet-d1', 'Model name.')
flags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '
'training finishes.')
flags.DEFINE_bool('profile', False, 'Profile training performance.')
flags.DEFINE_integer(
'tf_random_seed', None, 'Sets the TF graph seed for deterministic execution'
' across runs (for debugging).')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
# for train_and_eval mode
flags.DEFINE_bool(
'run_epoch_in_child_process', True,
'This option helps to rectify CPU memory leak. If True, every epoch is '
'run in a separate process for train and eval and memory will be cleared.'
'Drawback: need to kill 2 processes if trainining needs to be interrupted.')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.strategy == 'tpu':
tf.disable_eager_execution()
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
else:
tpu_cluster_resolver = None
# Check data path
if FLAGS.mode in ('train', 'train_and_eval'):
if FLAGS.training_file_pattern is None:
raise RuntimeError('Must specify --training_file_pattern for train.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.validation_file_pattern is None:
raise RuntimeError('Must specify --validation_file_pattern for eval.')
# Parse and override hparams
config = hparams_config.get_detection_config(FLAGS.model_name)
config.override(FLAGS.hparams)
if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs.
config.num_epochs = FLAGS.num_epochs
# Parse image size in case it is in string format.
config.image_size = utils.parse_image_size(config.image_size)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# In the TPUEstimator context, the meaning of `shard` and `replica` is the
# same; follwing the API, here has mixed use of both.
if FLAGS.use_spatial_partition:
# Checks input_partition_dims agrees with num_cores_per_replica.
if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
raise RuntimeError('--num_cores_per_replica must be a product of array'
'elements in --input_partition_dims.')
labels_partition_dims = {
'mean_num_positives': None,
'source_ids': None,
'groundtruth_data': None,
'image_scales': None,
'image_masks': None,
}
# The Input Partition Logic: We partition only the partition-able tensors.
feat_sizes = utils.get_feat_sizes(
config.get('image_size'), config.get('max_level'))
for level in range(config.get('min_level'), config.get('max_level') + 1):
def _can_partition(spatial_dim):
partitionable_index = np.where(
spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)
spatial_dim = feat_sizes[level]
if _can_partition(spatial_dim['height']) and _can_partition(
spatial_dim['width']):
labels_partition_dims['box_targets_%d' %
level] = FLAGS.input_partition_dims
labels_partition_dims['cls_targets_%d' %
level] = FLAGS.input_partition_dims
else:
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['cls_targets_%d' % level] = None
num_cores_per_replica = FLAGS.num_cores_per_replica
input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]
num_shards = FLAGS.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = FLAGS.num_cores
params = dict(
config.as_dict(),
model_name=FLAGS.model_name,
iterations_per_loop=FLAGS.iterations_per_loop,
model_dir=FLAGS.model_dir,
num_shards=num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
strategy=FLAGS.strategy,
backbone_ckpt=FLAGS.backbone_ckpt,
ckpt=FLAGS.ckpt,
val_json_file=FLAGS.val_json_file,
testdev_dir=FLAGS.testdev_dir,
mode=FLAGS.mode)
config_proto = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
if FLAGS.strategy != 'tpu':
if FLAGS.use_xla:
config_proto.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
config_proto.gpu_options.allow_growth = True
model_dir = FLAGS.model_dir
model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
max_instances_per_image = config.max_instances_per_image
if FLAGS.eval_samples:
eval_steps = int(FLAGS.eval_samples // FLAGS.eval_batch_size)
else:
eval_steps = None
total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
train_steps = total_examples // FLAGS.train_batch_size
logging.info(params)
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
config_file = os.path.join(model_dir, 'config.yaml')
if not tf.io.gfile.exists(config_file):
tf.io.gfile.GFile(config_file, 'w').write(str(config))
train_input_fn = dataloader.InputReader(
FLAGS.training_file_pattern,
is_training=True,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
eval_input_fn = dataloader.InputReader(
FLAGS.validation_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
if FLAGS.strategy == 'tpu':
tpu_config = tf.estimator.tpu.TPUConfig(
FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
tpu_config=tpu_config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
# TPUEstimator can do both train and eval.
train_est = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn_instance,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=params)
eval_est = train_est
else:
strategy = None
if FLAGS.strategy == 'gpus':
strategy = tf.distribute.MirroredStrategy()
run_config = tf.estimator.RunConfig(
model_dir=model_dir,
train_distribute=strategy,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
def get_estimator(global_batch_size):
params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
params['batch_size'] = global_batch_size // params['num_shards']
return tf.estimator.Estimator(
model_fn=model_fn_instance, config=run_config, params=params)
# train and eval need different estimator due to different batch size.
train_est = get_estimator(FLAGS.train_batch_size)
eval_est = get_estimator(FLAGS.eval_batch_size)
# start train/eval flow.
if FLAGS.mode == 'train':
train_est.train(input_fn=train_input_fn, max_steps=train_steps)
if FLAGS.eval_after_training:
eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
elif FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout):
logging.info('Starting to evaluate.')
try:
eval_results = eval_est.evaluate(eval_input_fn, steps=eval_steps)
# Terminate eval job when final checkpoint is reached.
try:
current_step = int(os.path.basename(ckpt).split('-')[1])
except IndexError:
logging.info('%s has no global step info: stop!', ckpt)
break
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
if current_step >= train_steps:
logging.info('Eval finished step %d/%d', current_step, train_steps)
break
except tf.errors.NotFoundError:
# Checkpoint might be not already deleted by the time eval finished.
# We simply skip ssuch case.
logging.info('Checkpoint %s no longer exists, skipping.', ckpt)
elif FLAGS.mode == 'train_and_eval':
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
try:
step = int(os.path.basename(ckpt).split('-')[1])
current_epoch = (
step * FLAGS.train_batch_size // FLAGS.num_examples_per_epoch)
logging.info('found ckpt at step %d (epoch %d)', step, current_epoch)
except (IndexError, TypeError):
logging.info('Folder %s has no ckpt with valid step.', FLAGS.model_dir)
current_epoch = 0
def run_train_and_eval(e):
print('\n =====> Starting training, epoch: %d.' % e)
train_est.train(
input_fn=train_input_fn,
max_steps=e * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
print('\n =====> Starting evaluation, epoch: %d.' % e)
eval_results = eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
epochs_per_cycle = 1 # higher number has less graph construction overhead.
for e in range(current_epoch + 1, config.num_epochs + 1, epochs_per_cycle):
if FLAGS.run_epoch_in_child_process:
p = multiprocessing.Process(target=run_train_and_eval, args=(e,))
p.start()
p.join()
else:
run_train_and_eval(e)
else:
logging.info('Invalid mode: %s', FLAGS.mode)
if __name__ == '__main__':
app.run(main)
|
filosofos_v1.py
|
# -*- coding: utf-8 -*-
import threading
num = 5
palillos = [threading.Semaphore(1) for i in range(num)]
def filosofo(id):
while True:
piensa(id)
levanta_palillos(id)
come(id)
suelta_palillos(id)
def piensa(id):
# (...)
print "%d - Tengo hambre..." % id
def levanta_palillos(id):
palillos[(id + 1) % num].acquire()
print "%d - Tengo el palillo derecho" % id
palillos[id].acquire()
print "%d - Tengo ambos palillos" % id
def suelta_palillos(id):
palillos[(id + 1) % num].release()
palillos[id].release()
print "%d - Sigamos pensando..." % id
def come(id):
print "%d - ¡A comer!" % id
# (...)
filosofos = []
for i in range(num):
fil = threading.Thread(target=filosofo, args=[i])
filosofos.append(fil)
fil.start()
|
test_miscell.py
|
from __future__ import absolute_import
# import common
import unittest
import stackless
import sys
import traceback
import weakref
import types
import contextlib
import time
import os
import struct
import gc
from stackless import _test_nostacklesscall as apply_not_stackless
import _teststackless
try:
import _thread as thread
import threading
withThreads = True
except:
withThreads = False
from support import test_main # @UnusedImport
from support import (StacklessTestCase, AsTaskletTestCase, require_one_thread,
testcase_leaks_references, is_zombie)
def is_soft():
softswitch = stackless.enable_softswitch(0)
stackless.enable_softswitch(softswitch)
return softswitch
def runtask():
x = 0
# evoke pickling of an range object
dummy = range(10)
for ii in range(1000):
x += 1
@contextlib.contextmanager
def switch_trapped():
stackless.switch_trap(1)
try:
yield
finally:
stackless.switch_trap(-1)
class TestWatchdog(StacklessTestCase):
def lifecycle(self, t):
# Initial state - unrun
self.assertTrue(t.alive)
self.assertTrue(t.scheduled)
self.assertEqual(t.recursion_depth, 0)
# allow hard switching
t.set_ignore_nesting(1)
softSwitching = stackless.enable_softswitch(0)
stackless.enable_softswitch(softSwitching)
# Run a little
res = stackless.run(10)
self.assertEqual(t, res)
self.assertTrue(t.alive)
self.assertTrue(t.paused)
self.assertFalse(t.scheduled)
self.assertEqual(t.recursion_depth, 1)
# Push back onto queue
t.insert()
self.assertFalse(t.paused)
self.assertTrue(t.scheduled)
# Run to completion
stackless.run()
self.assertFalse(t.alive)
self.assertFalse(t.scheduled)
self.assertEqual(t.recursion_depth, 0)
def test_aliveness1(self):
""" Test flags after being run. """
t = stackless.tasklet(runtask)()
self.lifecycle(t)
@StacklessTestCase.prepare_pickle_test_method
def test_aliveness2(self):
""" Same as 1, but with a pickled unrun tasklet. """
t = stackless.tasklet(runtask)()
t_new = self.loads(self.dumps((t)))
t.remove()
t_new.insert()
self.lifecycle(t_new)
@StacklessTestCase.prepare_pickle_test_method
def test_aliveness3(self):
""" Same as 1, but with a pickled run(slightly) tasklet. """
t = stackless.tasklet(runtask)()
t.set_ignore_nesting(1)
# Initial state - unrun
self.assertTrue(t.alive)
self.assertTrue(t.scheduled)
self.assertEqual(t.recursion_depth, 0)
softSwitching = stackless.enable_softswitch(0)
stackless.enable_softswitch(softSwitching)
# Run a little
res = stackless.run(100)
self.assertEqual(t, res)
self.assertTrue(t.alive)
self.assertTrue(t.paused)
self.assertFalse(t.scheduled)
self.assertEqual(t.recursion_depth, 1)
# Now save & load
dumped = self.dumps(t)
t_new = self.loads(dumped)
# Remove and insert & swap names around a bit
t.remove()
t = t_new
del t_new
t.insert()
self.assertTrue(t.alive)
self.assertFalse(t.paused)
self.assertTrue(t.scheduled)
self.assertEqual(t.recursion_depth, 1)
# Run to completion
if is_soft():
stackless.run()
else:
t.kill()
self.assertFalse(t.alive)
self.assertFalse(t.scheduled)
self.assertEqual(t.recursion_depth, 0)
class TestTaskletSwitching(StacklessTestCase):
"""Test the tasklet's own scheduling methods"""
def test_raise_exception(self):
c = stackless.channel()
def foo():
self.assertRaises(IndexError, c.receive)
s = stackless.tasklet(foo)()
s.run() # necessary, since raise_exception won't automatically run it
s.raise_exception(IndexError)
def test_run(self):
c = stackless.channel()
flag = [False]
def foo():
flag[0] = True
s = stackless.tasklet(foo)()
s.run()
self.assertEqual(flag[0], True)
def test_switch_to_current(self):
# See https://bitbucket.org/stackless-dev/stackless/issues/88
current = stackless.current
current.switch()
current.switch() # this second switch used to trigger an assertion violation
class TestTaskletThrowBase(object):
def test_throw_noargs(self):
c = stackless.channel()
def foo():
self.assertRaises(IndexError, c.receive)
s = stackless.tasklet(foo)()
s.run() # It needs to have started to run
self.throw(s, IndexError)
self.aftercheck(s)
def test_throw_args(self):
c = stackless.channel()
def foo():
try:
c.receive()
except Exception as e:
self.assertTrue(isinstance(e, IndexError))
self.assertEqual(e.args, (1, 2, 3))
s = stackless.tasklet(foo)()
s.run() # It needs to have started to run
self.throw(s, IndexError, (1, 2, 3))
self.aftercheck(s)
def test_throw_inst(self):
c = stackless.channel()
def foo():
try:
c.receive()
except Exception as e:
self.assertTrue(isinstance(e, IndexError))
self.assertEqual(e.args, (1, 2, 3))
s = stackless.tasklet(foo)()
s.run() # It needs to have started to run
self.throw(s, IndexError(1, 2, 3))
self.aftercheck(s)
def test_throw_exc_info(self):
c = stackless.channel()
def foo():
try:
c.receive()
except Exception as e:
self.assertTrue(isinstance(e, ZeroDivisionError))
s = stackless.tasklet(foo)()
s.run() # It needs to have started to run
def errfunc():
1 / 0
try:
errfunc()
except Exception:
self.throw(s, *sys.exc_info())
self.aftercheck(s)
def test_throw_traceback(self):
c = stackless.channel()
def foo():
try:
c.receive()
except Exception:
s = "".join(traceback.format_tb(sys.exc_info()[2]))
self.assertTrue("errfunc" in s)
s = stackless.tasklet(foo)()
s.run() # It needs to have started to run
def errfunc():
1 / 0
try:
errfunc()
except Exception:
self.throw(s, *sys.exc_info())
self.aftercheck(s)
def test_new(self):
c = stackless.channel()
def foo():
try:
c.receive()
except Exception as e:
self.assertTrue(isinstance(e, IndexError))
raise
s = stackless.tasklet(foo)()
self.assertEqual(s.frame, None)
self.assertTrue(s.alive)
# Test that the current "unhandled exception behaviour"
# is invoked for the not-yet-running tasklet.
def doit():
self.throw(s, IndexError)
if not self.pending:
self.assertRaises(IndexError, doit)
else:
doit()
self.assertRaises(IndexError, stackless.run)
def test_kill_new(self):
def t():
self.assertFalse("should not run this")
s = stackless.tasklet(t)()
# Should not do anything
s.throw(TaskletExit)
# the tasklet should be dead
stackless.run()
self.assertRaisesRegex(RuntimeError, "dead", s.run)
def test_dead(self):
c = stackless.channel()
def foo():
c.receive()
s = stackless.tasklet(foo)()
s.run()
c.send(None)
stackless.run()
self.assertFalse(s.alive)
def doit():
self.throw(s, IndexError)
self.assertRaises(RuntimeError, doit)
def test_kill_dead(self):
c = stackless.channel()
def foo():
c.receive()
s = stackless.tasklet(foo)()
s.run()
c.send(None)
stackless.run()
self.assertFalse(s.alive)
def doit():
self.throw(s, TaskletExit)
# nothing should happen here.
doit()
def test_throw_invalid(self):
s = stackless.getcurrent()
def t():
self.throw(s)
self.assertRaises(TypeError, t)
def t(): # @DuplicatedSignature
self.throw(s, IndexError(1), (1, 2, 3))
self.assertRaises(TypeError, t)
class TestTaskletThrowImmediate(StacklessTestCase, TestTaskletThrowBase):
pending = False
@classmethod
def throw(cls, s, *args):
s.throw(*args, pending=cls.pending)
def aftercheck(self, s):
# the tasklet ran immediately
self.assertFalse(s.alive)
class TestTaskletThrowNonImmediate(TestTaskletThrowImmediate):
pending = True
def aftercheck(self, s):
# After the throw, the tasklet still hasn't run
self.assertTrue(s.alive)
s.run()
self.assertFalse(s.alive)
class TestSwitchTrap(StacklessTestCase):
class SwitchTrap(object):
def __enter__(self):
stackless.switch_trap(1)
def __exit__(self, exc, val, tb):
stackless.switch_trap(-1)
switch_trap = SwitchTrap()
def test_schedule(self):
s = stackless.tasklet(lambda: None)()
with self.switch_trap:
self.assertRaisesRegex(RuntimeError, "switch_trap", stackless.schedule)
stackless.run()
def test_schedule_remove(self):
main = []
s = stackless.tasklet(lambda: main[0].insert())()
with self.switch_trap:
self.assertRaisesRegex(RuntimeError, "switch_trap", stackless.schedule_remove)
main.append(stackless.getcurrent())
stackless.schedule_remove()
def test_run(self):
s = stackless.tasklet(lambda: None)()
with self.switch_trap:
self.assertRaisesRegex(RuntimeError, "switch_trap", stackless.run)
stackless.run()
def test_run_specific(self):
s = stackless.tasklet(lambda: None)()
with self.switch_trap:
self.assertRaisesRegex(RuntimeError, "switch_trap", s.run)
s.run()
def test_run_paused(self):
s = stackless.tasklet(lambda: None)
s.bind(args=())
self.assertTrue(s.paused)
with self.switch_trap:
self.assertRaisesRegex(RuntimeError, "switch_trap", s.run)
self.assertTrue(s.paused)
stackless.run()
def test_send(self):
c = stackless.channel()
s = stackless.tasklet(lambda: c.receive())()
with self.switch_trap:
self.assertRaisesRegex(RuntimeError, "switch_trap", c.send, None)
c.send(None)
def test_send_throw(self):
c = stackless.channel()
def f():
self.assertRaises(NotImplementedError, c.receive)
s = stackless.tasklet(f)()
with self.switch_trap:
self.assertRaisesRegex(RuntimeError, "switch_trap", c.send_throw, NotImplementedError)
c.send_throw(NotImplementedError)
def test_receive(self):
c = stackless.channel()
s = stackless.tasklet(lambda: c.send(1))()
with self.switch_trap:
self.assertRaisesRegex(RuntimeError, "switch_trap", c.receive)
self.assertEqual(c.receive(), 1)
def test_receive_throw(self):
c = stackless.channel()
s = stackless.tasklet(lambda: c.send_throw(NotImplementedError))()
with self.switch_trap:
self.assertRaisesRegex(RuntimeError, "switch_trap", c.receive)
self.assertRaises(NotImplementedError, c.receive)
def test_raise_exception(self):
c = stackless.channel()
def foo():
self.assertRaises(IndexError, c.receive)
s = stackless.tasklet(foo)()
s.run() # necessary, since raise_exception won't automatically run it
with self.switch_trap:
self.assertRaisesRegex(RuntimeError, "switch_trap", s.raise_exception, RuntimeError)
s.raise_exception(IndexError)
def test_kill(self):
c = stackless.channel()
def foo():
self.assertRaises(TaskletExit, c.receive)
s = stackless.tasklet(foo)()
s.run() # necessary, since raise_exception won't automatically run it
with self.switch_trap:
self.assertRaisesRegex(RuntimeError, "switch_trap", s.kill)
s.kill()
def test_run2(self):
c = stackless.channel()
def foo():
pass
s = stackless.tasklet(foo)()
with self.switch_trap:
self.assertRaisesRegex(RuntimeError, "switch_trap", s.run)
s.run()
class TestKill(StacklessTestCase):
SLP_TASKLET_KILL_REBINDS_THREAD = False # see tasklet.c function impl_tasklet_kill()
def test_kill_pending_true(self):
killed = [False]
def foo():
try:
stackless.schedule()
except TaskletExit:
killed[0] = True
raise
t = stackless.tasklet(foo)()
t.run()
self.assertFalse(killed[0])
t.kill(pending=True)
self.assertFalse(killed[0])
t.run()
self.assertTrue(killed[0])
def test_kill_pending_False(self):
killed = [False]
def foo():
try:
stackless.schedule()
except TaskletExit:
killed[0] = True
raise
t = stackless.tasklet(foo)()
t.run()
self.assertFalse(killed[0])
t.kill(pending=False)
self.assertTrue(killed[0])
def test_kill_current(self):
killed = [False]
def task():
try:
stackless.current.kill()
except TaskletExit:
killed[0] = True
raise
t = stackless.tasklet(task)()
t.run()
self.assertTrue(killed[0])
self.assertFalse(t.alive)
self.assertEqual(t.thread_id, stackless.current.thread_id)
@unittest.skipUnless(withThreads, "requires thread support")
@require_one_thread
def test_kill_thread_without_main_tasklet(self):
# this test depends on a race condition.
# unfortunately I do not have any better test case
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
channel = stackless.channel()
tlet = stackless.tasklet()
self.tlet = tlet
class DelayError(Exception):
def __str__(self):
time.sleep(0.05)
return super(DelayError, self).__str__()
# catch stderr
self.addCleanup(setattr, sys, "stderr", sys.stderr)
sys.stderr = open(os.devnull, "w")
self.addCleanup(sys.stderr.close)
def other_thread_main():
tlet.bind_thread()
tlet.bind(channel.receive, ())
tlet.run()
ready.release()
raise DelayError("a slow exception")
# during the processing of this exception the
# thread has no main tasklet. Exception processing
# takes some time. During this time the main thread
# kills the tasklet
thread.start_new_thread(other_thread_main, ())
ready.acquire() # Be sure the other thread is ready.
#print("at end")
is_blocked = tlet.blocked
#tlet.bind_thread()
try:
tlet.kill(pending=True)
except RuntimeError as e:
self.assertIn("Target thread isn't initialised", str(e))
# print("got exception")
else:
# print("no exception")
pass
self.assertTrue(is_blocked)
time.sleep(0.5)
# print("unbinding done")
def _test_kill_without_thread_state(self, nl, block):
channel = stackless.channel()
loop = True
def task():
while loop:
try:
if block:
channel.receive()
else:
stackless.main.run()
except TaskletExit:
pass
def other_thread_main():
tlet.bind_thread()
tlet.run()
if nl == 0:
tlet = stackless.tasklet().bind(task, ())
else:
tlet = stackless.tasklet().bind(apply_not_stackless, (task,))
t = threading.Thread(target=other_thread_main, name="other thread")
t.start()
t.join()
time.sleep(0.05) # time for other_thread to clear its state
loop = False
if block:
self.assertTrue(tlet.blocked)
else:
self.assertFalse(tlet.blocked)
self.assertFalse(tlet.alive)
self.assertEqual(tlet.thread_id, -1)
self.assertRaisesRegex(RuntimeError, "tasklet has no thread", tlet.throw, TaskletExit, pending=True)
tlet.kill(pending=True)
self.assertFalse(tlet.blocked)
if self.SLP_TASKLET_KILL_REBINDS_THREAD and stackless.enable_softswitch(None) and nl == 0:
# rebinding and soft switching
self.assertTrue(tlet.scheduled)
self.assertTrue(tlet.alive)
tlet.remove()
tlet.bind(None)
else:
# hard switching
self.assertFalse(tlet.scheduled)
self.assertIsNone(tlet.next)
self.assertIsNone(tlet.prev)
self.assertFalse(tlet.alive)
tlet.remove()
tlet.kill()
@unittest.skipUnless(withThreads, "requires thread support")
@testcase_leaks_references("chatches TaskletExit and does not die in its own thread", soft_switching=False)
def test_kill_without_thread_state_nl0(self):
return self._test_kill_without_thread_state(0, False)
@unittest.skipUnless(withThreads, "requires thread support")
@testcase_leaks_references("chatches TaskletExit and does not die in its own thread")
def test_kill_without_thread_state_nl1(self):
return self._test_kill_without_thread_state(1, False)
@unittest.skipUnless(withThreads, "requires thread support")
@testcase_leaks_references("chatches TaskletExit and does not die in its own thread", soft_switching=False)
def test_kill_without_thread_state_blocked_nl0(self):
return self._test_kill_without_thread_state(0, True)
@unittest.skipUnless(withThreads, "requires thread support")
@testcase_leaks_references("chatches TaskletExit and does not die in its own thread")
def test_kill_without_thread_state_blocked_nl1(self):
return self._test_kill_without_thread_state(1, True)
class TestErrorHandler(StacklessTestCase):
def setUp(self):
super(TestErrorHandler, self).setUp()
self.handled = self.ran = 0
self.handled_tasklet = None
def test_set(self):
def foo():
pass
self.assertEqual(stackless.set_error_handler(foo), None)
self.assertEqual(stackless.set_error_handler(None), foo)
self.assertEqual(stackless.set_error_handler(None), None)
@contextlib.contextmanager
def handlerctxt(self, handler):
old = stackless.set_error_handler(handler)
try:
yield()
finally:
stackless.set_error_handler(old)
def handler(self, exc, val, tb):
self.assertTrue(exc)
self.handled = 1
self.handled_tasklet = stackless.getcurrent()
def borken_handler(self, exc, val, tb):
self.handled = 1
raise IndexError("we are the mods")
def get_handler(self):
h = stackless.set_error_handler(None)
stackless.set_error_handler(h)
return h
def func(self, handler):
self.ran = 1
self.assertEqual(self.get_handler(), handler)
raise ZeroDivisionError("I am borken")
def test_handler(self):
stackless.tasklet(self.func)(self.handler)
with self.handlerctxt(self.handler):
stackless.run()
self.assertTrue(self.ran)
self.assertTrue(self.handled)
def test_borken_handler(self):
stackless.tasklet(self.func)(self.borken_handler)
with self.handlerctxt(self.borken_handler):
self.assertRaisesRegex(IndexError, "mods", stackless.run)
self.assertTrue(self.ran)
self.assertTrue(self.handled)
def test_early_hrow(self):
"test that we handle errors thrown before the tasklet function runs"
s = stackless.tasklet(self.func)(self.handler)
with self.handlerctxt(self.handler):
s.throw(ZeroDivisionError, "thrown error")
self.assertFalse(self.ran)
self.assertTrue(self.handled)
def test_getcurrent(self):
# verify that the error handler runs in the context of the exiting tasklet
s = stackless.tasklet(self.func)(self.handler)
with self.handlerctxt(self.handler):
s.throw(ZeroDivisionError, "thrown error")
self.assertTrue(self.handled_tasklet is s)
self.handled_tasklet = None
s = stackless.tasklet(self.func)(self.handler)
with self.handlerctxt(self.handler):
s.run()
self.assertTrue(self.handled_tasklet is s)
def test_throw_pending(self):
# make sure that throwing a pending error doesn't immediately throw
def func(h):
self.ran = 1
stackless.schedule()
self.func(h)
s = stackless.tasklet(func)(self.handler)
s.run()
self.assertTrue(self.ran)
self.assertFalse(self.handled)
with self.handlerctxt(self.handler):
s.throw(ZeroDivisionError, "thrown error", pending=True)
self.assertEqual(self.handled_tasklet, None)
with self.handlerctxt(self.handler):
s.run()
self.assertEqual(self.handled_tasklet, s)
#
# Test context manager soft switching support
# See http://www.stackless.com/ticket/22
#
def _create_contextlib_test_classes():
import test.test_contextlib as module
g = globals()
for name in dir(module):
obj = getattr(module, name, None)
if not (isinstance(obj, type) and issubclass(obj, unittest.TestCase)):
continue
g[name] = type(name, (AsTaskletTestCase, obj), {})
_create_contextlib_test_classes()
class TestContextManager(StacklessTestCase):
def nestingLevel(self):
self.assertFalse(stackless.getcurrent().nesting_level)
class C(object):
def __enter__(self_): # @NoSelf
self.assertFalse(stackless.getcurrent().nesting_level)
return self_
def __exit__(self_, exc_type, exc_val, exc_tb): # @NoSelf
self.assertFalse(stackless.getcurrent().nesting_level)
return False
with C() as c:
self.assertTrue(isinstance(c, C))
def test_nestingLevel(self):
if not stackless.enable_softswitch(None):
# the test requires softswitching
return
stackless.tasklet(self.nestingLevel)()
stackless.run()
def test_tasklet_switch_in_exitfunc_after_return(self):
# A test for Stackless issue #159.
# The test must not crash python.
class ContextManager(object):
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
# soft switch to the main tasklet
stackless.schedule_remove(4711)
# return True to enable the code path used to silence an exception
return True # python used to crash here, until #159 was fixed
def task():
with ContextManager():
# you can set a gdb breakpoint in _teststackless.c test_cframe()
_teststackless.test_cframe(0, 0)
# We need a return statement, to set stack TOP to an int()
return None
self.fail("Not reached")
t = stackless.tasklet(task)()
t.run()
self.assertEqual(t.tempval, 4711) # back from context manager __exit__
t.run()
self.assertFalse(t.alive) # tasklet done
class TestAtomic(StacklessTestCase):
"""Test the getting and setting of the tasklet's 'atomic' flag, and the
context manager to set it to True
"""
def testAtomic(self):
old = stackless.getcurrent().atomic
try:
val = stackless.getcurrent().set_atomic(False)
self.assertEqual(val, old)
self.assertEqual(stackless.getcurrent().atomic, False)
val = stackless.getcurrent().set_atomic(True)
self.assertEqual(val, False)
self.assertEqual(stackless.getcurrent().atomic, True)
val = stackless.getcurrent().set_atomic(True)
self.assertEqual(val, True)
self.assertEqual(stackless.getcurrent().atomic, True)
val = stackless.getcurrent().set_atomic(False)
self.assertEqual(val, True)
self.assertEqual(stackless.getcurrent().atomic, False)
finally:
stackless.getcurrent().set_atomic(old)
self.assertEqual(stackless.getcurrent().atomic, old)
def testAtomicCtxt(self):
old = stackless.getcurrent().atomic
stackless.getcurrent().set_atomic(False)
try:
with stackless.atomic():
self.assertTrue(stackless.getcurrent().atomic)
finally:
stackless.getcurrent().set_atomic(old)
def testAtomicNopCtxt(self):
old = stackless.getcurrent().atomic
stackless.getcurrent().set_atomic(True)
try:
with stackless.atomic():
self.assertTrue(stackless.getcurrent().atomic)
finally:
stackless.getcurrent().set_atomic(old)
class TestSchedule(AsTaskletTestCase):
def setUp(self):
super(TestSchedule, self).setUp()
self.events = []
def testSchedule(self):
def foo(previous):
self.events.append("foo")
self.assertTrue(previous.scheduled)
t = stackless.tasklet(foo)(stackless.getcurrent())
self.assertTrue(t.scheduled)
stackless.schedule()
self.assertEqual(self.events, ["foo"])
def testScheduleRemoveFail(self):
def foo(previous):
self.events.append("foo")
self.assertFalse(previous.scheduled)
previous.insert()
self.assertTrue(previous.scheduled)
t = stackless.tasklet(foo)(stackless.getcurrent())
stackless.schedule_remove()
self.assertEqual(self.events, ["foo"])
class TestBind(StacklessTestCase):
def setUp(self):
super(TestBind, self).setUp()
self.finally_run_count = 0
self.args = self.kwargs = None
def task(self, with_c_state):
try:
if with_c_state:
_teststackless.test_cstate(lambda: stackless.schedule_remove(None))
else:
stackless.schedule_remove(None)
finally:
self.finally_run_count += 1
def argstest(self, *args, **kwargs):
self.args = args
self.kwargs = dict(kwargs)
def assertArgs(self, args, kwargs):
self.assertEqual(args, self.args)
self.assertEqual(kwargs, self.kwargs)
def test_bind(self):
t = stackless.tasklet()
wr = weakref.ref(t)
self.assertFalse(t.alive)
self.assertIsNone(t.frame)
self.assertEqual(t.nesting_level, 0)
t.bind(None) # must not change the tasklet
self.assertFalse(t.alive)
self.assertIsNone(t.frame)
self.assertEqual(t.nesting_level, 0)
t.bind(self.task)
t.setup(False)
stackless.run()
self.assertFalse(t.scheduled)
self.assertTrue(t.alive)
if stackless.enable_softswitch(None):
self.assertTrue(t.restorable)
self.assertIsInstance(t.frame, types.FrameType)
t.insert()
stackless.run()
# remove the tasklet. Must run the finally clause
t = None
self.assertIsNone(wr()) # tasklet has been deleted
self.assertEqual(self.finally_run_count, 1)
def test_bind_fail_not_callable(self):
class C(object):
pass
self.assertRaisesRegex(TypeError, "callable", stackless.getcurrent().bind, C())
def test_unbind_ok(self):
if not stackless.enable_softswitch(None):
# the test requires softswitching
return
t = stackless.tasklet(self.task)(False)
wr = weakref.ref(t)
# prepare a paused tasklet
stackless.run()
self.assertFalse(t.scheduled)
self.assertTrue(t.alive)
self.assertEqual(t.nesting_level, 0)
self.assertIsInstance(t.frame, types.FrameType)
t.bind(None)
self.assertFalse(t.alive)
self.assertIsNone(t.frame)
# remove the tasklet. Must not run the finally clause
t = None
self.assertIsNone(wr()) # tasklet has been deleted
self.assertEqual(self.finally_run_count, 0)
def test_unbind_fail_current(self):
self.assertRaisesRegex(RuntimeError, "current tasklet", stackless.getcurrent().bind, None)
def test_unbind_fail_scheduled(self):
t = stackless.tasklet(self.task)(False)
# prepare a paused tasklet
stackless.run()
t.insert()
self.assertTrue(t.scheduled)
self.assertTrue(t.alive)
self.assertIsInstance(t.frame, types.FrameType)
self.assertRaisesRegex(RuntimeError, "scheduled", t.bind, None)
def test_unbind_fail_cstate(self):
t = stackless.tasklet(self.task)(True)
wr = weakref.ref(t)
# prepare a paused tasklet
stackless.run()
self.assertFalse(t.scheduled)
self.assertTrue(t.alive)
self.assertGreaterEqual(t.nesting_level, 1)
self.assertIsInstance(t.frame, types.FrameType)
self.assertRaisesRegex(RuntimeError, "C state", t.bind, None)
# remove the tasklet. Must run the finally clause
t = None
self.assertIsNone(wr()) # tasklet has been deleted
self.assertEqual(self.finally_run_count, 1)
def test_bind_noargs(self):
t = stackless.tasklet(self.task)
t.bind(self.argstest)
self.assertRaises(RuntimeError, t.run)
def test_bind_args(self):
args = "foo", "bar"
t = stackless.tasklet(self.task)
t.bind(self.argstest, args)
t.run()
self.assertArgs(args, {})
t = stackless.tasklet(self.task)
t.bind(self.argstest, args=args)
t.run()
self.assertArgs(args, {})
def test_bind_kwargs(self):
t = stackless.tasklet(self.task)
kwargs = {"hello": "world"}
t.bind(self.argstest, None, kwargs)
t.run()
self.assertArgs((), kwargs)
t = stackless.tasklet(self.task)
t.bind(self.argstest, kwargs=kwargs)
t.run()
self.assertArgs((), kwargs)
def test_bind_args_kwargs(self):
args = ("foo", "bar")
kwargs = {"hello": "world"}
t = stackless.tasklet(self.task)
t.bind(self.argstest, args, kwargs)
t.run()
self.assertArgs(args, kwargs)
t = stackless.tasklet(self.task)
t.bind(self.argstest, args=args, kwargs=kwargs)
t.run()
self.assertArgs(args, kwargs)
def test_bind_args_kwargs_nofunc(self):
args = ("foo", "bar")
kwargs = {"hello": "world"}
t = stackless.tasklet(self.argstest)
t.bind(None, args, kwargs)
t.run()
self.assertArgs(args, kwargs)
t = stackless.tasklet(self.argstest)
t.bind(args=args, kwargs=kwargs)
t.run()
self.assertArgs(args, kwargs)
def test_bind_args_not_runnable(self):
args = ("foo", "bar")
kwargs = {"hello": "world"}
t = stackless.tasklet(self.task)
t.bind(self.argstest, args, kwargs)
self.assertFalse(t.scheduled)
t.run()
@unittest.skipUnless(withThreads, "requires thread support")
def test_unbind_main(self):
self.skipUnlessSoftswitching()
done = []
def other():
main = stackless.main
self.assertRaisesRegex(RuntimeError, "can't unbind the main tasklet", main.bind, None)
# the initial nesting level depends on the test runner.
# We need a main tasklet with nesting_level == 0. Therefore we
# use a thread
def other_thread():
self.assertEqual(stackless.current.nesting_level, 0)
self.assertIs(stackless.current, stackless.main)
stackless.tasklet(other)().switch()
done.append(True)
t = threading.Thread(target=other_thread, name="other thread")
t.start()
t.join()
self.assertTrue(done[0])
@unittest.skipUnless(withThreads, "requires thread support")
def test_rebind_main(self):
# rebind the main tasklet of a thread. This is highly discouraged,
# because it will deadlock, if the thread is a non daemon threading.Thread.
self.skipUnlessSoftswitching()
ready = thread.allocate_lock()
ready.acquire()
self.target_called = False
self.main_returned = False
def target():
self.target_called = True
ready.release()
def other_thread_main():
self.assertTrue(stackless.current.is_main)
try:
stackless.tasklet(stackless.main.bind)(target, ()).switch()
finally:
self.main_returned = True
ready.release()
thread.start_new_thread(other_thread_main, ())
ready.acquire()
self.assertTrue(self.target_called)
self.assertFalse(self.main_returned)
def test_rebind_recursion_depth(self):
self.skipUnlessSoftswitching()
self.recursion_depth_in_test = None
def tasklet_outer():
tasklet_inner()
def tasklet_inner():
stackless.main.switch()
def test():
self.recursion_depth_in_test = stackless.current.recursion_depth
tlet = stackless.tasklet(tasklet_outer)()
self.assertEqual(tlet.recursion_depth, 0)
tlet.run()
self.assertEqual(tlet.recursion_depth, 2)
tlet.bind(test, ())
self.assertEqual(tlet.recursion_depth, 0)
tlet.run()
self.assertEqual(tlet.recursion_depth, 0)
self.assertEqual(self.recursion_depth_in_test, 1)
@unittest.skipUnless(withThreads, "requires thread support")
@testcase_leaks_references("Tasklet chatches TaskletExit and refuses to die in its thread")
def test_unbind_fail_cstate_no_thread(self):
# https://bitbucket.org/stackless-dev/stackless/issues/92
loop = True
def task():
while loop:
try:
stackless.main.switch()
except TaskletExit:
pass
def other_thread_main():
tlet.bind_thread()
tlet.run()
tlet = stackless.tasklet().bind(apply_not_stackless, (task,))
t = threading.Thread(target=other_thread_main, name="other thread")
t.start()
t.join()
time.sleep(0.05) # other_thread needs some time to be destroyed
loop = False
self.assertEqual(tlet.thread_id, -1)
self.assertFalse(tlet.alive)
self.assertFalse(tlet.restorable)
self.assertGreater(tlet.nesting_level, 0)
self.assertRaisesRegex(RuntimeError, "tasklet has C state on its stack", tlet.bind, None)
self.tasklet_is_uncollectable(tlet) # mark this tasklet as uncollectable
def test_setup_fail_alive(self):
# make sure, that you can't bind a tasklet, which is alive
# https://bitbucket.org/stackless-dev/stackless/issues/106
def task():
t = stackless.current
t.tempval = lambda: None
self.assertTrue(t.alive)
self.assertRaisesRegex(RuntimeError, "tasklet is alive", t.setup)
t = stackless.tasklet(task, ())
t.run()
self.assertFalse(t.alive)
class TestSwitch(StacklessTestCase):
"""Test the new tasklet.switch() method, which allows
explicit switching
"""
def setUp(self):
super(TestSwitch, self).setUp()
self.source = stackless.getcurrent()
self.finished = False
self.c = stackless.channel()
def target(self):
self.assertTrue(self.source.paused)
self.source.insert()
self.finished = True
def blocked_target(self):
self.c.receive()
self.finished = True
def test_switch(self):
"""Simple switch"""
t = stackless.tasklet(self.target)()
t.switch()
self.assertTrue(self.finished)
def test_switch_self(self):
t = stackless.getcurrent()
t.switch()
def test_switch_blocked(self):
t = stackless.tasklet(self.blocked_target)()
t.run()
self.assertTrue(t.blocked)
self.assertRaisesRegex(RuntimeError, "blocked", t.switch)
self.c.send(None)
self.assertTrue(self.finished)
def test_switch_paused(self):
t = stackless.tasklet(self.target)
t.bind(args=())
self.assertTrue(t.paused)
t.switch()
self.assertTrue(self.finished)
def test_switch_trapped(self):
t = stackless.tasklet(self.target)()
self.assertFalse(t.paused)
with switch_trapped():
self.assertRaisesRegex(RuntimeError, "switch_trap", t.switch)
self.assertFalse(t.paused)
t.switch()
self.assertTrue(self.finished)
def test_switch_self_trapped(self):
t = stackless.getcurrent()
with switch_trapped():
t.switch() # ok, switching to ourselves!
def test_switch_blocked_trapped(self):
t = stackless.tasklet(self.blocked_target)()
t.run()
self.assertTrue(t.blocked)
with switch_trapped():
self.assertRaisesRegex(RuntimeError, "blocked", t.switch)
self.assertTrue(t.blocked)
self.c.send(None)
self.assertTrue(self.finished)
def test_switch_paused_trapped(self):
t = stackless.tasklet(self.target)
t.bind(args=())
self.assertTrue(t.paused)
with switch_trapped():
self.assertRaisesRegex(RuntimeError, "switch_trap", t.switch)
self.assertTrue(t.paused)
t.switch()
self.assertTrue(self.finished)
class TestModule(StacklessTestCase):
def test_get_debug(self):
self.assertIn(stackless.getdebug(), [True, False])
def test_debug(self):
self.assertIn(stackless.debug, [True, False])
def test_get_uncollectables(self):
self.assertEqual(type(stackless.getuncollectables()), list)
def test_uncollectables(self):
self.assertEqual(type(stackless.uncollectables), list)
def test_get_threads(self):
self.assertEqual(type(stackless.getthreads()), list)
def test_threads(self):
self.assertEqual(type(stackless.threads), list)
class TestCstate(StacklessTestCase):
def test_cstate(self):
self.assertIsInstance(stackless.main.cstate, stackless.cstack)
def test_str_size(self):
c = stackless.main.cstate
s = str(c)
self.assertEqual(len(s), c.size * struct.calcsize("P"))
def test_nesting_level(self):
c = stackless.main.cstate
l1 = c.nesting_level
self.assertIsInstance(l1, int)
def test_chain(self):
# create at least one additional C-stack
t = stackless.tasklet(apply_not_stackless)(stackless.main.switch,)
t.run()
self.addCleanup(t.run)
start = stackless.main.cstate
c = start.next
self.assertIsNot(c, start)
while(c is not start):
self.assertIsInstance(c, stackless.cstack)
self.assertIs(c.prev.next, c)
c = c.next
class TestTaskletFinalizer(StacklessTestCase):
def test_zombie(self):
loop = True
def task():
while loop:
try:
stackless.schedule_remove()
except TaskletExit:
pass
t = stackless.tasklet(apply_not_stackless)(task,)
t.run()
self.assertTrue(t.paused)
t.__del__()
self.assertTrue(is_zombie(t))
self.assertIn(t, gc.garbage)
gc.garbage.remove(t)
# clean up
loop = False
t.kill()
#///////////////////////////////////////////////////////////////////////////////
if __name__ == '__main__':
if not sys.argv[1:]:
sys.argv.append('-v')
unittest.main()
|
backends.py
|
import os
import random
import time
import tempfile
import threading
from django.core.cache.backends.filebased import pickle, FileBasedCache as DjangoFileBasedCache
class FileBasedCache(DjangoFileBasedCache):
"""Faile based backend with some improvements."""
_fs_transaction_suffix = '.__dj_cache'
def set(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
fname = self._key_to_file(key)
dirname = os.path.dirname(fname)
if timeout is None:
timeout = self.default_timeout
if random.random() > 0.8:
threading.Thread(target=self._cull).start()
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
dir=dirname)
with os.fdopen(fd, 'wb') as f:
now = time.time()
pickle.dump(now + timeout, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
os.rename(tmp, fname)
except (IOError, OSError):
pass
|
node.py
|
import logging
import threading
import time
from enum import Enum
from typing import Any, Callable, Dict, List, Tuple, Optional, Set
from utils import node_want_to_terminate
from .message import NormalMessage
class WsnNode(object):
"""无线传感网络中的一个节点
"""
# 日志配置
logger: logging.Logger = logging.getLogger('wsn.node')
class EnumNodeStatus(Enum):
STOPPED = 0
RUNNING = 1
# 节点 id
node_id: int
# 节点坐标
x: float
y: float
# 通信参数
r: float
power: float
total_power: float
pc_per_send: float
# 节点线程
thread: Optional[threading.Thread]
# 节点线程的控制位
thread_cnt: str
# 收发消息相关
recv_queue: List[NormalMessage]
send_queue: List[str or NormalMessage]
reply_queue: Dict[str, NormalMessage]
recv_count: int
replied_nodes: Set[int or str]
sending: Optional[NormalMessage]
teammate_num: int
route_len: Dict[str, Dict[str, int]]
partners: List[str]
action: Callable[..., Any]
replied_messages: Set[str]
# 是否多线程模式
multithreading: bool = True
def __init__(
self,
node_id: int, x: float, y: float, r: float,
total_power: float, pc_per_send: float, medium
) -> None:
self.node_id = node_id
self.x = x
self.y = y
self.r = r
self.power = total_power
self.total_power = total_power
self.pc_per_send = pc_per_send
self.thread = None
self.thread_cnt = 'stop'
self.recv_queue = []
self.send_queue = []
self.reply_queue = dict()
self.recv_count = 0
self.replied_nodes = set()
self.sending = None
self.medium = medium
self.action = self.action2
self.route_len = {}
self.teammate_num = 0
self.replied_messages = set()
def start(self) -> bool:
"""启动节点
:return: 只要方法执行完节点是处于运行状态,就返回 True 否则返回 False
"""
if self.thread is not None and self.thread.is_alive():
return True
self.recv_queue = []
self.recv_count = 0
self.thread_cnt = 'start'
self.thread = threading.Thread(target=self.thread_main, name=f'node-{self.node_id}')
self.thread.start()
return self.thread.is_alive()
def stop(self, timeout: int = -1) -> bool:
"""停止节点
:param timeout: 等待线程结束的超时时间(秒),如果 < 0 则不等待(函数一定返回 True ),如果 0 则表示无限长的超时时间
:return: 只要方法执行完节点是处于停止状态,就返回 True 否则返回 False
"""
if self.thread is None or not self.thread.is_alive():
self.logger.warning(f'node-{self.node_id} 节点已处于停止状态,不能再停止')
return True
# 设置控制位,通知线程应当停止
self.thread_cnt = 'stop'
if timeout < 0:
self.logger.info(f'node-{self.node_id} 已通知节点停止,但不等待其停止')
return True
try:
# 等待线程停止,超时时间 30 秒
self.logger.error(f'node-{self.node_id} 等待线程结束,超时时间 {timeout} 秒')
self.thread.join(timeout)
self.thread = None
except TimeoutError:
self.logger.error(f'node-{self.node_id} 等待线程结束超时')
# 判定停止结果
if self.thread is None or self.thread.is_alive():
self.logger.info(f'node-{self.node_id} 已停止')
return True
else:
self.logger.warning(f'node-{self.node_id} 停止失败')
return False
def echo(self) -> None:
self.logger.info(f'我还活着!')
def send(self, message: NormalMessage):
node_tag = ("node-" + str(self.node_id) + ": ") if not self.multithreading else ""
if self.power - self.pc_per_send >= 0:
self.power -= self.pc_per_send
self.medium.spread(self, message)
self.logger.info(f'{node_tag}发送消息 "{message.data}"')
else:
self.stop()
self.logger.warning(f'{node_tag}电量不足,发送失败,已关机')
def thread_main(self) -> None:
self.logger.info(f'节点启动')
while True:
if self.thread_cnt == 'stop':
self.logger.info(f'节点停止')
break
self.action()
time.sleep(5)
def action0(self):
"""无限复读广播
"""
node_tag = ("node-" + str(self.node_id) + ": ") if not self.multithreading else ""
# 如果发送队列里有消息需要发送,且当前没有别的消息需要发送,则从发送队列取出一条消息进行发送
if self.send_queue and self.sending is None:
message = self.send_queue.pop(0)
if isinstance(message, str):
self.sending = NormalMessage(data=message, source=self.node_id)
elif isinstance(message, NormalMessage):
self.sending = message
# 如果当前有正在发送的消息则发送之
if self.sending is not None:
self.send(NormalMessage(uuid=self.sending.uuid, data=self.sending.data, source=self.node_id))
# 处理收到的各种消息
while self.recv_queue:
message = self.recv_queue.pop(0)
if message.uuid == self.sending:
continue
self.recv_count += 1
self.logger.info(f'{node_tag}接收到消息 "{message.data}"')
self.sending = message
def action1(self) -> Optional[bool]:
"""要求回应
"""
node_tag = ("node-" + str(self.node_id) + ": ") if not self.multithreading else ""
# 如果一条消息已经被全部确认,则该条消息发送完毕
if self.sending is not None and len(self.replied_nodes) >= self.teammate_num:
self.sending = None
self.replied_nodes = set()
# 唤醒主线程
if self.multithreading:
self.logger.info(f'唤起主线程')
node_want_to_terminate.set()
else:
return True
# 如果发送队列里有消息需要发送,且当前没有别的消息需要发送,则从发送队列取出一条消息进行发送
if self.send_queue and self.sending is None:
message = self.send_queue.pop(0)
if isinstance(message, str):
self.sending = NormalMessage(data=message, source=self.node_id)
elif isinstance(message, NormalMessage):
self.sending = message
# 如果当前有正在发送的消息则发送之
if self.sending is not None:
self.send(self.sending)
# 处理收到的各种消息
recv_set = set()
while self.recv_queue:
message = self.recv_queue.pop(0)
# 自己发送的或者处理过的消息丢弃
if self.node_id in message.handlers:
if self.sending is not None and message.uuid == self.sending.uuid and message.is_reply:
self.replied_nodes.add(message.handlers[0])
continue
if f'{message.uuid}-{message.handlers[0]}-{message.handlers[-1]}' not in recv_set:
recv_set.add(f'{message.uuid}-{message.handlers[0]}-{message.handlers[-1]}')
if not message.is_reply:
self.recv_count += 1
self.logger.info(f'{node_tag}接收到消息 "{message.data}"')
# 给消息注册上自己名字,转发之
message.register(self.node_id)
self.send(message)
# 如果消息不是一个回应,则同时发送一条对该消息的回应
if not message.is_reply:
self.send(NormalMessage(uuid=message.uuid, is_reply=True, data=message.data, source=self.node_id))
def action2(self) -> Optional[bool]:
"""要求回应,最常用路径,原路回应
"""
node_tag = ("node-" + str(self.node_id) + ": ") if not self.multithreading else ""
# 如果一条消息已经被全部确认,则该条消息发送完毕
if self.sending is not None and not self.sending.is_reply and len(self.replied_nodes) >= self.teammate_num:
self.sending = None
self.replied_nodes = set()
# 唤醒主线程
if self.multithreading:
self.logger.info(f'唤起主线程')
node_want_to_terminate.set()
else:
return True
# 如果发送队列里有消息需要发送,且当前没有别的消息需要发送,则从发送队列取出一条消息进行发送
if self.send_queue and self.sending is None:
message = self.send_queue.pop(0)
if isinstance(message, str):
self.sending = NormalMessage(data=message, source=self.node_id)
elif isinstance(message, NormalMessage):
self.sending = message
# 如果当前有正在发送的消息则发送之
if self.sending is not None:
self.send(self.sending)
for i, reply in self.reply_queue.items():
for _ in range(1):
self.send(reply)
# 处理收到的各种消息
while self.recv_queue:
message = self.recv_queue.pop(0)
if message.is_reply:
self.logger.info(f'{node_tag}接收到消息 "{message.data}" {message.handlers}')
if self.reply_queue.get(f'{message.uuid}-{message.handlers[0]}') is not None and \
len(
self.reply_queue.get(f'{message.uuid}-{message.handlers[0]}').handlers
) > len(message.handlers):
self.reply_queue.pop(f'{message.uuid}-{message.handlers[0]}')
continue
if len(message.handlers) < 2:
continue
if self.node_id != message.handlers[1]:
continue
message.handlers.pop(1)
self.send(message)
if self.sending is not None and not self.sending.is_reply and message.uuid == self.sending.uuid:
self.replied_nodes.add(message.handlers[0])
continue
if f'{message.uuid}-{message.handlers[0]}' not in self.replied_messages:
self.replied_messages.add(f'{message.uuid}-{message.handlers[0]}')
self.reply_queue[f'{message.uuid}-{message.handlers[0]}'] = message
else:
# 自己发送的或者处理过的消息丢弃
if self.node_id in message.handlers:
continue
self.recv_count += 1
self.logger.info(f'{node_tag}接收到消息 "{message.data}" {message.handlers}')
if str(message.handlers[0]) not in self.route_len.keys():
self.route_len[str(message.handlers[0])] = {}
start_point_route = self.route_len[str(message.handlers[0])]
if str(message.handlers[-1]) not in start_point_route.keys():
start_point_route[str(message.handlers[-1])] = 0
start_point_route[str(message.handlers[-1])] += 1
# 是从最常见路径传播过来的
if start_point_route[str(message.handlers[-1])] == max(*list(start_point_route.values()) + [0]):
# 给消息注册上自己名字,转发之
message.register(self.node_id)
self.send(message)
# 没回复过的消息回应以下
if message.uuid not in self.replied_messages:
message.is_reply = True
message.handlers = message.handlers[::-1]
self.replied_messages.add(message.uuid)
self.reply_queue[f'{message.uuid}-{message.handlers[0]}'] = message
def action3(self):
"""节点一次活动(方案二)
在多线程模式时,该函数每隔一段休眠时间运行一次
在单线程模式,由调度器调度运行
"""
node_tag = ("node-" + str(self.node_id) + ": ") if not self.multithreading else ""
# 如果发送队列里有消息需要发送,且当前没有别的消息需要发送,则从发送队列取出一条消息进行发送
if self.send_queue and self.sending is None:
message = self.send_queue.pop(0)
if isinstance(message, str):
self.sending = NormalMessage(data=message, source=self.node_id)
self.replied_nodes.add(self.node_id)
elif isinstance(message, NormalMessage):
self.sending = message
# 如果当前有正在发送的消息则发送之
if self.sending is not None:
for _ in range(100):
self.send(self.sending)
self.sending = None
# 处理收到的各种消息
while self.recv_queue:
message = self.recv_queue.pop(0)
# 自己发送的或者处理过的消息丢弃
if message.uuid in self.replied_nodes:
continue
self.recv_count += 1
self.replied_nodes.add(message.uuid)
self.logger.info(f'{node_tag}接收到消息 "{message.data}"')
self.send_queue.append(message)
@property
def xy(self) -> Tuple[float, float]:
return self.x, self.y
@property
def is_alive(self):
return not self.multithreading or (self.thread is not None and self.thread.is_alive())
class WsnNodeManager(object):
"""无线传感网络的节点管理器
为无线传感网络管理节点的生成和销毁
"""
# 日志配置
logger: logging = logging.getLogger('wsn.nm')
nodes: List[WsnNode]
# wsn: Wsn
def __init__(self, wsn) -> None:
self.nodes = []
self.wsn = wsn
def add_node(self, x: float, y: float, r: float, power: float, pc_per_send: float) -> WsnNode:
new_node_id = self.nodes[-1].node_id + 1 if len(self.nodes) > 0 else 1
new_node = WsnNode(new_node_id, x, y, r, power, pc_per_send, self.wsn.medium)
self.nodes.append(new_node)
self.logger.info(f'新增节点 node-{new_node_id} ({x}, {y}), r={r}, power={power}, pc_per_send={pc_per_send}')
return new_node
def pop_node(self, node_id: int) -> Optional[WsnNode]:
try:
return self.nodes.pop(self.get_nodes_id().index(node_id))
except ValueError:
return None
def get_nodes_id(self) -> List[int]:
return [node.node_id for node in self.nodes]
def get_nodes_xy(self, nodes_id: Optional[List[int]] = None) -> List[Tuple[float, float]]:
nodes_xy = []
for node in self.nodes if nodes_id is None else [node for node in self.nodes if node.node_id in nodes_id]:
nodes_xy.append(node.xy)
return nodes_xy
@property
def node_num(self) -> int:
return len(self.nodes)
|
dask.py
|
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module
# pylint: disable=missing-class-docstring, invalid-name
# pylint: disable=too-many-lines, fixme
# pylint: disable=too-few-public-methods
# pylint: disable=import-error
"""
Dask extensions for distributed training
----------------------------------------
See :doc:`Distributed XGBoost with Dask </tutorials/dask>` for simple tutorial. Also
:doc:`/python/dask-examples/index` for some examples.
There are two sets of APIs in this module, one is the functional API including
``train`` and ``predict`` methods. Another is stateful Scikit-Learner wrapper
inherited from single-node Scikit-Learn interface.
The implementation is heavily influenced by dask_xgboost:
https://github.com/dask/dask-xgboost
Optional dask configuration
===========================
- **xgboost.scheduler_address**: Specify the scheduler address, see :ref:`tracker-ip`.
.. versionadded:: 1.6.0
.. code-block:: python
dask.config.set({"xgboost.scheduler_address": "192.0.0.100"})
# We can also specify the port.
dask.config.set({"xgboost.scheduler_address": "192.0.0.100:12345"})
"""
import platform
import logging
import collections
import socket
from contextlib import contextmanager
from collections import defaultdict
from threading import Thread
from functools import partial, update_wrapper
from typing import TYPE_CHECKING, List, Tuple, Callable, Optional, Any, Union, Dict, Set
from typing import Sequence
from typing import Awaitable, Generator, TypeVar
import numpy
from . import rabit, config
from .callback import TrainingCallback
from .compat import LazyLoader
from .compat import scipy_sparse
from .compat import PANDAS_INSTALLED, DataFrame, Series, pandas_concat
from .compat import lazy_isinstance
from ._typing import FeatureNames, FeatureTypes
from .core import DMatrix, DeviceQuantileDMatrix, Booster, _expect, DataIter
from .core import Objective, Metric
from .core import _deprecate_positional_args, _has_categorical
from .training import train as worker_train
from .tracker import RabitTracker, get_host_ip
from .sklearn import XGBModel, XGBClassifier, XGBRegressorBase, XGBClassifierBase
from .sklearn import _wrap_evaluation_matrices, _objective_decorator, _check_rf_callback
from .sklearn import XGBRankerMixIn
from .sklearn import xgboost_model_doc
from .sklearn import _cls_predict_proba
from .sklearn import XGBRanker
if TYPE_CHECKING:
from dask import dataframe as dd
from dask import array as da
from dask import delayed as ddelayed
import dask
import distributed
else:
dd = LazyLoader("dd", globals(), "dask.dataframe")
da = LazyLoader("da", globals(), "dask.array")
ddelayed = LazyLoader("Delayed", globals(), "dask.delayed")
dask = LazyLoader("dask", globals(), "dask")
distributed = LazyLoader("distributed", globals(), "dask.distributed")
_DaskCollection = Union["da.Array", "dd.DataFrame", "dd.Series"]
try:
from mypy_extensions import TypedDict
TrainReturnT = TypedDict(
"TrainReturnT",
{
"booster": Booster,
"history": Dict,
},
)
except ImportError:
TrainReturnT = Dict[str, Any] # type:ignore
__all__ = [
"RabitContext",
"DaskDMatrix",
"DaskDeviceQuantileDMatrix",
"DaskXGBRegressor",
"DaskXGBClassifier",
"DaskXGBRanker",
"DaskXGBRFRegressor",
"DaskXGBRFClassifier",
"train",
"predict",
"inplace_predict",
]
# TODOs:
# - CV
#
# Note for developers:
#
# As of writing asyncio is still a new feature of Python and in depth documentation is
# rare. Best examples of various asyncio tricks are in dask (luckily). Classes like
# Client, Worker are awaitable. Some general rules for the implementation here:
#
# - Synchronous world is different from asynchronous one, and they don't mix well.
# - Write everything with async, then use distributed Client sync function to do the
# switch.
# - Use Any for type hint when the return value can be union of Awaitable and plain
# value. This is caused by Client.sync can return both types depending on context.
# Right now there's no good way to silent:
#
# await train(...)
#
# if train returns an Union type.
LOGGER = logging.getLogger("[xgboost.dask]")
def _multi_lock() -> Any:
"""MultiLock is only available on latest distributed. See:
https://github.com/dask/distributed/pull/4503
"""
try:
from distributed import MultiLock
except ImportError:
class MultiLock: # type:ignore
def __init__(self, *args: Any, **kwargs: Any) -> None:
pass
def __enter__(self) -> "MultiLock":
return self
def __exit__(self, *args: Any, **kwargs: Any) -> None:
return
async def __aenter__(self) -> "MultiLock":
return self
async def __aexit__(self, *args: Any, **kwargs: Any) -> None:
return
return MultiLock
def _try_start_tracker(
n_workers: int,
addrs: List[Union[Optional[str], Optional[Tuple[str, int]]]],
) -> Dict[str, Union[int, str]]:
env: Dict[str, Union[int, str]] = {"DMLC_NUM_WORKER": n_workers}
try:
if isinstance(addrs[0], tuple):
host_ip = addrs[0][0]
port = addrs[0][1]
rabit_context = RabitTracker(
host_ip=get_host_ip(host_ip),
n_workers=n_workers,
port=port,
use_logger=False,
)
else:
assert isinstance(addrs[0], str) or addrs[0] is None
rabit_context = RabitTracker(
host_ip=get_host_ip(addrs[0]), n_workers=n_workers, use_logger=False
)
env.update(rabit_context.worker_envs())
rabit_context.start(n_workers)
thread = Thread(target=rabit_context.join)
thread.daemon = True
thread.start()
except socket.error as e:
if len(addrs) < 2 or e.errno != 99:
raise
LOGGER.warning(
"Failed to bind address '%s', trying to use '%s' instead.",
str(addrs[0]),
str(addrs[1]),
)
env = _try_start_tracker(n_workers, addrs[1:])
return env
def _start_tracker(
n_workers: int,
addr_from_dask: Optional[str],
addr_from_user: Optional[Tuple[str, int]],
) -> Dict[str, Union[int, str]]:
"""Start Rabit tracker, recurse to try different addresses."""
env = _try_start_tracker(n_workers, [addr_from_user, addr_from_dask])
return env
def _assert_dask_support() -> None:
try:
import dask # pylint: disable=W0621,W0611
except ImportError as e:
raise ImportError(
"Dask needs to be installed in order to use this module"
) from e
if platform.system() == "Windows":
msg = "Windows is not officially supported for dask/xgboost,"
msg += " contribution are welcomed."
LOGGER.warning(msg)
class RabitContext:
"""A context controlling rabit initialization and finalization."""
def __init__(self, args: List[bytes]) -> None:
self.args = args
worker = distributed.get_worker()
self.args.append(
("DMLC_TASK_ID=[xgboost.dask]:" + str(worker.address)).encode()
)
def __enter__(self) -> None:
rabit.init(self.args)
assert rabit.is_distributed()
LOGGER.debug("-------------- rabit say hello ------------------")
def __exit__(self, *args: List) -> None:
rabit.finalize()
LOGGER.debug("--------------- rabit say bye ------------------")
def concat(value: Any) -> Any: # pylint: disable=too-many-return-statements
"""To be replaced with dask builtin."""
if isinstance(value[0], numpy.ndarray):
return numpy.concatenate(value, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.csr_matrix):
return scipy_sparse.vstack(value, format="csr")
if scipy_sparse and isinstance(value[0], scipy_sparse.csc_matrix):
return scipy_sparse.vstack(value, format="csc")
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
# other sparse format will be converted to CSR.
return scipy_sparse.vstack(value, format="csr")
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
return pandas_concat(value, axis=0)
if lazy_isinstance(value[0], "cudf.core.dataframe", "DataFrame") or lazy_isinstance(
value[0], "cudf.core.series", "Series"
):
from cudf import concat as CUDF_concat # pylint: disable=import-error
return CUDF_concat(value, axis=0)
if lazy_isinstance(value[0], "cupy._core.core", "ndarray"):
import cupy
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
d_v = v.device.id
assert d_v == d, "Concatenating arrays on different devices."
return cupy.concatenate(value, axis=0)
return dd.multi.concat(list(value), axis=0)
def _xgb_get_client(client: Optional["distributed.Client"]) -> "distributed.Client":
"""Simple wrapper around testing None."""
if not isinstance(client, (type(distributed.get_client()), type(None))):
raise TypeError(
_expect([type(distributed.get_client()), type(None)], type(client))
)
ret = distributed.get_client() if client is None else client
return ret
# From the implementation point of view, DaskDMatrix complicates a lots of
# things. A large portion of the code base is about syncing and extracting
# stuffs from DaskDMatrix. But having an independent data structure gives us a
# chance to perform some specialized optimizations, like building histogram
# index directly.
class DaskDMatrix:
# pylint: disable=missing-docstring, too-many-instance-attributes
"""DMatrix holding on references to Dask DataFrame or Dask Array. Constructing a
`DaskDMatrix` forces all lazy computation to be carried out. Wait for the input data
explicitly if you want to see actual computation of constructing `DaskDMatrix`.
See doc for :py:obj:`xgboost.DMatrix` constructor for other parameters. DaskDMatrix
accepts only dask collection.
.. note::
DaskDMatrix does not repartition or move data between workers. It's
the caller's responsibility to balance the data.
.. versionadded:: 1.0.0
Parameters
----------
client :
Specify the dask client used for training. Use default client returned from dask
if it's set to None.
"""
@_deprecate_positional_args
def __init__(
self,
client: "distributed.Client",
data: _DaskCollection,
label: Optional[_DaskCollection] = None,
*,
weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
missing: float = None,
silent: bool = False, # pylint: disable=unused-argument
feature_names: FeatureNames = None,
feature_types: FeatureTypes = None,
group: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
enable_categorical: bool = False,
) -> None:
_assert_dask_support()
client = _xgb_get_client(client)
self.feature_names = feature_names
self.feature_types = feature_types
self.missing = missing
self.enable_categorical = enable_categorical
if qid is not None and weight is not None:
raise NotImplementedError("per-group weight is not implemented.")
if group is not None:
raise NotImplementedError(
"group structure is not implemented, use qid instead."
)
if len(data.shape) != 2:
raise ValueError(f"Expecting 2 dimensional input, got: {data.shape}")
if not isinstance(data, (dd.DataFrame, da.Array)):
raise TypeError(_expect((dd.DataFrame, da.Array), type(data)))
if not isinstance(label, (dd.DataFrame, da.Array, dd.Series, type(None))):
raise TypeError(_expect((dd.DataFrame, da.Array, dd.Series), type(label)))
self._n_cols = data.shape[1]
assert isinstance(self._n_cols, int)
self.worker_map: Dict[str, "distributed.Future"] = defaultdict(list)
self.is_quantile: bool = False
self._init = client.sync(
self._map_local_data,
client,
data,
label=label,
weights=weight,
base_margin=base_margin,
qid=qid,
feature_weights=feature_weights,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound,
)
def __await__(self) -> Generator:
return self._init.__await__()
async def _map_local_data(
self,
client: "distributed.Client",
data: _DaskCollection,
label: Optional[_DaskCollection] = None,
weights: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None,
) -> "DaskDMatrix":
"""Obtain references to local data."""
def inconsistent(
left: List[Any], left_name: str, right: List[Any], right_name: str
) -> str:
msg = (
f"Partitions between {left_name} and {right_name} are not "
f"consistent: {len(left)} != {len(right)}. "
f"Please try to repartition/rechunk your data."
)
return msg
def check_columns(parts: numpy.ndarray) -> None:
# x is required to be 2 dim in __init__
assert parts.ndim == 1 or parts.shape[1], (
"Data should be"
" partitioned by row. To avoid this specify the number"
" of columns for your dask Array explicitly. e.g."
" chunks=(partition_size, X.shape[1])"
)
def to_delayed(d: _DaskCollection) -> List[ddelayed.Delayed]:
"""Breaking data into partitions, a trick borrowed from dask_xgboost. `to_delayed`
downgrades high-level objects into numpy or pandas equivalents .
"""
d = client.persist(d)
delayed_obj = d.to_delayed()
if isinstance(delayed_obj, numpy.ndarray):
# da.Array returns an array to delayed objects
check_columns(delayed_obj)
delayed_list: List[ddelayed.Delayed] = delayed_obj.flatten().tolist()
else:
# dd.DataFrame
delayed_list = delayed_obj
return delayed_list
OpDelayed = TypeVar("OpDelayed", _DaskCollection, None)
def flatten_meta(meta: OpDelayed) -> OpDelayed:
if meta is not None:
meta_parts: List[ddelayed.Delayed] = to_delayed(meta)
return meta_parts
return None
X_parts = to_delayed(data)
y_parts = flatten_meta(label)
w_parts = flatten_meta(weights)
margin_parts = flatten_meta(base_margin)
qid_parts = flatten_meta(qid)
ll_parts = flatten_meta(label_lower_bound)
lu_parts = flatten_meta(label_upper_bound)
parts: Dict[str, List[ddelayed.Delayed]] = {"data": X_parts}
def append_meta(m_parts: Optional[List[ddelayed.Delayed]], name: str) -> None:
if m_parts is not None:
assert len(X_parts) == len(m_parts), inconsistent(
X_parts, "X", m_parts, name
)
parts[name] = m_parts
append_meta(y_parts, "label")
append_meta(w_parts, "weight")
append_meta(margin_parts, "base_margin")
append_meta(qid_parts, "qid")
append_meta(ll_parts, "label_lower_bound")
append_meta(lu_parts, "label_upper_bound")
# At this point, `parts` looks like:
# [(x0, x1, ..), (y0, y1, ..), ..] in delayed form
# turn into list of dictionaries.
packed_parts: List[Dict[str, ddelayed.Delayed]] = []
for i in range(len(X_parts)):
part_dict: Dict[str, ddelayed.Delayed] = {}
for key, value in parts.items():
part_dict[key] = value[i]
packed_parts.append(part_dict)
# delay the zipped result
# pylint: disable=no-member
delayed_parts: List[ddelayed.Delayed] = list(map(dask.delayed, packed_parts))
# At this point, the mental model should look like:
# [(x0, y0, ..), (x1, y1, ..), ..] in delayed form
# convert delayed objects into futures and make sure they are realized
fut_parts: List[distributed.Future] = client.compute(delayed_parts)
await distributed.wait(fut_parts) # async wait for parts to be computed
# maybe we can call dask.align_partitions here to ease the partition alignment?
for part in fut_parts:
# Each part is [x0, y0, w0, ...] in future form.
assert part.status == "finished", part.status
# Preserving the partition order for prediction.
self.partition_order = {}
for i, part in enumerate(fut_parts):
self.partition_order[part.key] = i
key_to_partition = {part.key: part for part in fut_parts}
who_has: Dict[str, Tuple[str, ...]] = await client.scheduler.who_has(
keys=[part.key for part in fut_parts]
)
worker_map: Dict[str, List[distributed.Future]] = defaultdict(list)
for key, workers in who_has.items():
worker_map[next(iter(workers))].append(key_to_partition[key])
self.worker_map = worker_map
if feature_weights is None:
self.feature_weights = None
else:
self.feature_weights = await client.compute(feature_weights).result()
return self
def _create_fn_args(self, worker_addr: str) -> Dict[str, Any]:
"""Create a dictionary of objects that can be pickled for function
arguments.
"""
return {
"feature_names": self.feature_names,
"feature_types": self.feature_types,
"feature_weights": self.feature_weights,
"missing": self.missing,
"enable_categorical": self.enable_categorical,
"parts": self.worker_map.get(worker_addr, None),
"is_quantile": self.is_quantile,
}
def num_col(self) -> int:
return self._n_cols
_MapRetT = TypeVar("_MapRetT")
async def map_worker_partitions(
client: Optional["distributed.Client"],
func: Callable[..., _MapRetT],
*refs: Any,
workers: List[str],
) -> List[_MapRetT]:
"""Map a function onto partitions of each worker."""
# Note for function purity:
# XGBoost is deterministic in most of the cases, which means train function is
# supposed to be idempotent. One known exception is gblinear with shotgun updater.
# We haven't been able to do a full verification so here we keep pure to be False.
client = _xgb_get_client(client)
futures = []
for addr in workers:
args = []
for ref in refs:
if isinstance(ref, DaskDMatrix):
# pylint: disable=protected-access
args.append(ref._create_fn_args(addr))
else:
args.append(ref)
fut = client.submit(
func, *args, pure=False, workers=[addr], allow_other_workers=False
)
futures.append(fut)
results = await client.gather(futures)
return results
_DataParts = List[Dict[str, Any]]
def _get_worker_parts(list_of_parts: _DataParts) -> Dict[str, List[Any]]:
assert isinstance(list_of_parts, list)
result: Dict[str, List[Any]] = {}
def append(i: int, name: str) -> None:
if name in list_of_parts[i]:
part = list_of_parts[i][name]
else:
part = None
if part is not None:
if name not in result:
result[name] = []
result[name].append(part)
for i, _ in enumerate(list_of_parts):
append(i, "data")
append(i, "label")
append(i, "weight")
append(i, "base_margin")
append(i, "qid")
append(i, "label_lower_bound")
append(i, "label_upper_bound")
return result
class DaskPartitionIter(DataIter): # pylint: disable=R0902
"""A data iterator for `DaskDeviceQuantileDMatrix`."""
def __init__(
self,
data: List[Any],
label: Optional[List[Any]] = None,
weight: Optional[List[Any]] = None,
base_margin: Optional[List[Any]] = None,
qid: Optional[List[Any]] = None,
label_lower_bound: Optional[List[Any]] = None,
label_upper_bound: Optional[List[Any]] = None,
feature_names: FeatureNames = None,
feature_types: Optional[Union[Any, List[Any]]] = None,
) -> None:
self._data = data
self._label = label
self._weight = weight
self._base_margin = base_margin
self._qid = qid
self._label_lower_bound = label_lower_bound
self._label_upper_bound = label_upper_bound
self._feature_names = feature_names
self._feature_types = feature_types
assert isinstance(self._data, collections.abc.Sequence)
types = (collections.abc.Sequence, type(None))
assert isinstance(self._label, types)
assert isinstance(self._weight, types)
assert isinstance(self._base_margin, types)
assert isinstance(self._label_lower_bound, types)
assert isinstance(self._label_upper_bound, types)
self._iter = 0 # set iterator to 0
super().__init__()
def _get(self, attr: str) -> Optional[Any]:
if getattr(self, attr) is not None:
return getattr(self, attr)[self._iter]
return None
def data(self) -> Any:
"""Utility function for obtaining current batch of data."""
return self._data[self._iter]
def reset(self) -> None:
"""Reset the iterator"""
self._iter = 0
def next(self, input_data: Callable) -> int:
"""Yield next batch of data"""
if self._iter == len(self._data):
# Return 0 when there's no more batch.
return 0
feature_names: FeatureNames = None
if self._feature_names:
feature_names = self._feature_names
else:
if hasattr(self.data(), "columns"):
feature_names = self.data().columns.format()
else:
feature_names = None
input_data(
data=self.data(),
label=self._get("_label"),
weight=self._get("_weight"),
group=None,
qid=self._get("_qid"),
base_margin=self._get("_base_margin"),
label_lower_bound=self._get("_label_lower_bound"),
label_upper_bound=self._get("_label_upper_bound"),
feature_names=feature_names,
feature_types=self._feature_types,
)
self._iter += 1
return 1
class DaskDeviceQuantileDMatrix(DaskDMatrix):
"""Specialized data type for `gpu_hist` tree method. This class is used to reduce the
memory usage by eliminating data copies. Internally the all partitions/chunks of data
are merged by weighted GK sketching. So the number of partitions from dask may affect
training accuracy as GK generates bounded error for each merge. See doc string for
:py:obj:`xgboost.DeviceQuantileDMatrix` and :py:obj:`xgboost.DMatrix` for other
parameters.
.. versionadded:: 1.2.0
Parameters
----------
max_bin : Number of bins for histogram construction.
"""
@_deprecate_positional_args
def __init__(
self,
client: "distributed.Client",
data: _DaskCollection,
label: Optional[_DaskCollection] = None,
*,
weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
missing: float = None,
silent: bool = False, # disable=unused-argument
feature_names: FeatureNames = None,
feature_types: Optional[Union[Any, List[Any]]] = None,
max_bin: int = 256,
group: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
enable_categorical: bool = False,
) -> None:
super().__init__(
client=client,
data=data,
label=label,
weight=weight,
base_margin=base_margin,
group=group,
qid=qid,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound,
missing=missing,
silent=silent,
feature_weights=feature_weights,
feature_names=feature_names,
feature_types=feature_types,
enable_categorical=enable_categorical,
)
self.max_bin = max_bin
self.is_quantile = True
def _create_fn_args(self, worker_addr: str) -> Dict[str, Any]:
args = super()._create_fn_args(worker_addr)
args["max_bin"] = self.max_bin
return args
def _create_device_quantile_dmatrix(
feature_names: FeatureNames,
feature_types: Optional[Union[Any, List[Any]]],
feature_weights: Optional[Any],
missing: float,
nthread: int,
parts: Optional[_DataParts],
max_bin: int,
enable_categorical: bool,
) -> DeviceQuantileDMatrix:
worker = distributed.get_worker()
if parts is None:
msg = f"worker {worker.address} has an empty DMatrix."
LOGGER.warning(msg)
import cupy
d = DeviceQuantileDMatrix(
cupy.zeros((0, 0)),
feature_names=feature_names,
feature_types=feature_types,
max_bin=max_bin,
enable_categorical=enable_categorical,
)
return d
unzipped_dict = _get_worker_parts(parts)
it = DaskPartitionIter(**unzipped_dict)
dmatrix = DeviceQuantileDMatrix(
it,
missing=missing,
feature_names=feature_names,
feature_types=feature_types,
nthread=nthread,
max_bin=max_bin,
enable_categorical=enable_categorical,
)
dmatrix.set_info(feature_weights=feature_weights)
return dmatrix
def _create_dmatrix(
feature_names: FeatureNames,
feature_types: Optional[Union[Any, List[Any]]],
feature_weights: Optional[Any],
missing: float,
nthread: int,
enable_categorical: bool,
parts: Optional[_DataParts],
) -> DMatrix:
"""Get data that local to worker from DaskDMatrix.
Returns
-------
A DMatrix object.
"""
worker = distributed.get_worker()
list_of_parts = parts
if list_of_parts is None:
msg = f"worker {worker.address} has an empty DMatrix."
LOGGER.warning(msg)
d = DMatrix(
numpy.empty((0, 0)),
feature_names=feature_names,
feature_types=feature_types,
enable_categorical=enable_categorical,
)
return d
T = TypeVar("T")
def concat_or_none(data: Sequence[Optional[T]]) -> Optional[T]:
if any(part is None for part in data):
return None
return concat(data)
unzipped_dict = _get_worker_parts(list_of_parts)
concated_dict: Dict[str, Any] = {}
for key, value in unzipped_dict.items():
v = concat_or_none(value)
concated_dict[key] = v
dmatrix = DMatrix(
**concated_dict,
missing=missing,
feature_names=feature_names,
feature_types=feature_types,
nthread=nthread,
enable_categorical=enable_categorical,
feature_weights=feature_weights,
)
return dmatrix
def _dmatrix_from_list_of_parts(
is_quantile: bool, **kwargs: Any
) -> Union[DMatrix, DeviceQuantileDMatrix]:
if is_quantile:
return _create_device_quantile_dmatrix(**kwargs)
return _create_dmatrix(**kwargs)
async def _get_rabit_args(
n_workers: int, dconfig: Optional[Dict[str, Any]], client: "distributed.Client"
) -> List[bytes]:
"""Get rabit context arguments from data distribution in DaskDMatrix."""
# There are 3 possible different addresses:
# 1. Provided by user via dask.config
# 2. Guessed by xgboost `get_host_ip` function
# 3. From dask scheduler
# We try 1 and 3 if 1 is available, otherwise 2 and 3.
valid_config = ["scheduler_address"]
# See if user config is available
host_ip: Optional[str] = None
port: int = 0
if dconfig is not None:
for k in dconfig:
if k not in valid_config:
raise ValueError(f"Unknown configuration: {k}")
host_ip = dconfig.get("scheduler_address", None)
try:
host_ip, port = distributed.comm.get_address_host_port(host_ip)
except ValueError:
pass
if host_ip is not None:
user_addr = (host_ip, port)
else:
user_addr = None
# Try address from dask scheduler, this might not work, see
# https://github.com/dask/dask-xgboost/pull/40
try:
sched_addr = distributed.comm.get_address_host(client.scheduler.address)
sched_addr = sched_addr.strip("/:")
except Exception: # pylint: disable=broad-except
sched_addr = None
env = await client.run_on_scheduler(
_start_tracker, n_workers, sched_addr, user_addr
)
rabit_args = [f"{k}={v}".encode() for k, v in env.items()]
return rabit_args
def _get_dask_config() -> Optional[Dict[str, Any]]:
return dask.config.get("xgboost", default=None)
# train and predict methods are supposed to be "functional", which meets the
# dask paradigm. But as a side effect, the `evals_result` in single-node API
# is no longer supported since it mutates the input parameter, and it's not
# intuitive to sync the mutation result. Therefore, a dictionary containing
# evaluation history is instead returned.
def _get_workers_from_data(
dtrain: DaskDMatrix, evals: Optional[Sequence[Tuple[DaskDMatrix, str]]]
) -> List[str]:
X_worker_map: Set[str] = set(dtrain.worker_map.keys())
if evals:
for e in evals:
assert len(e) == 2
assert isinstance(e[0], DaskDMatrix) and isinstance(e[1], str)
if e[0] is dtrain:
continue
worker_map = set(e[0].worker_map.keys())
X_worker_map = X_worker_map.union(worker_map)
return list(X_worker_map)
async def _train_async(
client: "distributed.Client",
global_config: Dict[str, Any],
dconfig: Optional[Dict[str, Any]],
params: Dict[str, Any],
dtrain: DaskDMatrix,
num_boost_round: int,
evals: Optional[Sequence[Tuple[DaskDMatrix, str]]],
obj: Optional[Objective],
feval: Optional[Metric],
early_stopping_rounds: Optional[int],
verbose_eval: Union[int, bool],
xgb_model: Optional[Booster],
callbacks: Optional[Sequence[TrainingCallback]],
custom_metric: Optional[Metric],
) -> Optional[TrainReturnT]:
workers = _get_workers_from_data(dtrain, evals)
_rabit_args = await _get_rabit_args(len(workers), dconfig, client)
if params.get("booster", None) == "gblinear":
raise NotImplementedError(
f"booster `{params['booster']}` is not yet supported for dask."
)
def dispatched_train(
parameters: Dict,
rabit_args: List[bytes],
train_id: int,
evals_name: List[str],
evals_id: List[int],
train_ref: dict,
*refs: dict,
) -> Optional[TrainReturnT]:
worker = distributed.get_worker()
local_param = parameters.copy()
n_threads = 0
for p in ["nthread", "n_jobs"]:
if (
local_param.get(p, None) is not None
and local_param.get(p, worker.nthreads) != worker.nthreads
):
LOGGER.info("Overriding `nthreads` defined in dask worker.")
n_threads = local_param[p]
break
if n_threads == 0 or n_threads is None:
n_threads = worker.nthreads
local_param.update({"nthread": n_threads, "n_jobs": n_threads})
local_history: TrainingCallback.EvalsLog = {}
with RabitContext(rabit_args), config.config_context(**global_config):
Xy = _dmatrix_from_list_of_parts(**train_ref, nthread=n_threads)
evals: List[Tuple[DMatrix, str]] = []
for i, ref in enumerate(refs):
if evals_id[i] == train_id:
evals.append((Xy, evals_name[i]))
continue
eval_Xy = _dmatrix_from_list_of_parts(**ref, nthread=n_threads)
evals.append((eval_Xy, evals_name[i]))
booster = worker_train(
params=local_param,
dtrain=Xy,
num_boost_round=num_boost_round,
evals_result=local_history,
evals=evals if len(evals) != 0 else None,
obj=obj,
feval=feval,
custom_metric=custom_metric,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
xgb_model=xgb_model,
callbacks=callbacks,
)
if Xy.num_row() != 0:
ret: Optional[TrainReturnT] = {
"booster": booster,
"history": local_history,
}
else:
ret = None
return ret
async with _multi_lock()(workers, client):
if evals is not None:
evals_data = [d for d, n in evals]
evals_name = [n for d, n in evals]
evals_id = [id(d) for d in evals_data]
else:
evals_data = []
evals_name = []
evals_id = []
results = await map_worker_partitions(
client,
dispatched_train,
params,
_rabit_args,
id(dtrain),
evals_name,
evals_id,
*([dtrain] + evals_data),
workers=workers,
)
return list(filter(lambda ret: ret is not None, results))[0]
@_deprecate_positional_args
def train( # pylint: disable=unused-argument
client: "distributed.Client",
params: Dict[str, Any],
dtrain: DaskDMatrix,
num_boost_round: int = 10,
*,
evals: Optional[Sequence[Tuple[DaskDMatrix, str]]] = None,
obj: Optional[Objective] = None,
feval: Optional[Metric] = None,
early_stopping_rounds: Optional[int] = None,
xgb_model: Optional[Booster] = None,
verbose_eval: Union[int, bool] = True,
callbacks: Optional[Sequence[TrainingCallback]] = None,
custom_metric: Optional[Metric] = None,
) -> Any:
"""Train XGBoost model.
.. versionadded:: 1.0.0
.. note::
Other parameters are the same as :py:func:`xgboost.train` except for
`evals_result`, which is returned as part of function return value instead of
argument.
Parameters
----------
client :
Specify the dask client used for training. Use default client returned from dask
if it's set to None.
Returns
-------
results: dict
A dictionary containing trained booster and evaluation history. `history` field
is the same as `eval_result` from `xgboost.train`.
.. code-block:: python
{'booster': xgboost.Booster,
'history': {'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}}
"""
_assert_dask_support()
client = _xgb_get_client(client)
args = locals()
return client.sync(
_train_async,
global_config=config.get_config(),
dconfig=_get_dask_config(),
**args,
)
def _can_output_df(is_df: bool, output_shape: Tuple) -> bool:
return is_df and len(output_shape) <= 2
def _maybe_dataframe(
data: Any, prediction: Any, columns: List[int], is_df: bool
) -> Any:
"""Return dataframe for prediction when applicable."""
if _can_output_df(is_df, prediction.shape):
# Need to preserve the index for dataframe.
# See issue: https://github.com/dmlc/xgboost/issues/6939
# In older versions of dask, the partition is actually a numpy array when input is
# dataframe.
index = getattr(data, "index", None)
if lazy_isinstance(data, "cudf.core.dataframe", "DataFrame"):
import cudf
if prediction.size == 0:
return cudf.DataFrame({}, columns=columns, dtype=numpy.float32)
prediction = cudf.DataFrame(
prediction, columns=columns, dtype=numpy.float32, index=index
)
else:
if prediction.size == 0:
return DataFrame({}, columns=columns, dtype=numpy.float32, index=index)
prediction = DataFrame(
prediction, columns=columns, dtype=numpy.float32, index=index
)
return prediction
async def _direct_predict_impl( # pylint: disable=too-many-branches
mapped_predict: Callable,
booster: "distributed.Future",
data: _DaskCollection,
base_margin: Optional[_DaskCollection],
output_shape: Tuple[int, ...],
meta: Dict[int, str],
) -> _DaskCollection:
columns = tuple(meta.keys())
if len(output_shape) >= 3 and isinstance(data, dd.DataFrame):
# Without this check, dask will finish the prediction silently even if output
# dimension is greater than 3. But during map_partitions, dask passes a
# `dd.DataFrame` as local input to xgboost, which is converted to csr_matrix by
# `_convert_unknown_data` since dd.DataFrame is not known to xgboost native
# binding.
raise ValueError(
"Use `da.Array` or `DaskDMatrix` when output has more than 2 dimensions."
)
if _can_output_df(isinstance(data, dd.DataFrame), output_shape):
if base_margin is not None and isinstance(base_margin, da.Array):
# Easier for map_partitions
base_margin_df: Optional[dd.DataFrame] = base_margin.to_dask_dataframe()
else:
base_margin_df = base_margin
predictions = dd.map_partitions(
mapped_predict,
booster,
data,
True,
columns,
base_margin_df,
meta=dd.utils.make_meta(meta),
)
# classification can return a dataframe, drop 1 dim when it's reg/binary
if len(output_shape) == 1:
predictions = predictions.iloc[:, 0]
else:
if base_margin is not None and isinstance(
base_margin, (dd.Series, dd.DataFrame)
):
# Easier for map_blocks
base_margin_array: Optional[da.Array] = base_margin.to_dask_array()
else:
base_margin_array = base_margin
# Input data is 2-dim array, output can be 1(reg, binary)/2(multi-class,
# contrib)/3(contrib, interaction)/4(interaction) dims.
if len(output_shape) == 1:
drop_axis: Union[int, List[int]] = [1] # drop from 2 to 1 dim.
new_axis: Union[int, List[int]] = []
else:
drop_axis = []
if isinstance(data, dd.DataFrame):
new_axis = list(range(len(output_shape) - 2))
else:
new_axis = [i + 2 for i in range(len(output_shape) - 2)]
if len(output_shape) == 2:
# Somehow dask fail to infer output shape change for 2-dim prediction, and
# `chunks = (None, output_shape[1])` doesn't work due to None is not
# supported in map_blocks.
chunks: Optional[List[Tuple]] = list(data.chunks)
assert isinstance(chunks, list)
chunks[1] = (output_shape[1],)
else:
chunks = None
predictions = da.map_blocks(
mapped_predict,
booster,
data,
False,
columns,
base_margin_array,
chunks=chunks,
drop_axis=drop_axis,
new_axis=new_axis,
dtype=numpy.float32,
)
return predictions
def _infer_predict_output(
booster: Booster, features: int, is_df: bool, inplace: bool, **kwargs: Any
) -> Tuple[Tuple[int, ...], Dict[int, str]]:
"""Create a dummy test sample to infer output shape for prediction."""
assert isinstance(features, int)
rng = numpy.random.RandomState(1994)
test_sample = rng.randn(1, features)
if inplace:
kwargs = kwargs.copy()
if kwargs.pop("predict_type") == "margin":
kwargs["output_margin"] = True
m = DMatrix(test_sample)
# generated DMatrix doesn't have feature name, so no validation.
test_predt = booster.predict(m, validate_features=False, **kwargs)
n_columns = test_predt.shape[1] if len(test_predt.shape) > 1 else 1
meta: Dict[int, str] = {}
if _can_output_df(is_df, test_predt.shape):
for i in range(n_columns):
meta[i] = "f4"
return test_predt.shape, meta
async def _get_model_future(
client: "distributed.Client", model: Union[Booster, Dict, "distributed.Future"]
) -> "distributed.Future":
if isinstance(model, Booster):
booster = await client.scatter(model, broadcast=True)
elif isinstance(model, dict):
booster = await client.scatter(model["booster"], broadcast=True)
elif isinstance(model, distributed.Future):
booster = model
if booster.type is not Booster:
raise TypeError(
f"Underlying type of model future should be `Booster`, got {booster.type}"
)
else:
raise TypeError(_expect([Booster, dict, distributed.Future], type(model)))
return booster
# pylint: disable=too-many-statements
async def _predict_async(
client: "distributed.Client",
global_config: Dict[str, Any],
model: Union[Booster, Dict, "distributed.Future"],
data: _DaskCollection,
output_margin: bool,
missing: float,
pred_leaf: bool,
pred_contribs: bool,
approx_contribs: bool,
pred_interactions: bool,
validate_features: bool,
iteration_range: Tuple[int, int],
strict_shape: bool,
) -> _DaskCollection:
_booster = await _get_model_future(client, model)
if not isinstance(data, (DaskDMatrix, da.Array, dd.DataFrame)):
raise TypeError(_expect([DaskDMatrix, da.Array, dd.DataFrame], type(data)))
def mapped_predict(
booster: Booster, partition: Any, is_df: bool, columns: List[int], _: Any
) -> Any:
with config.config_context(**global_config):
m = DMatrix(
data=partition,
missing=missing,
enable_categorical=_has_categorical(booster, partition)
)
predt = booster.predict(
data=m,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
validate_features=validate_features,
iteration_range=iteration_range,
strict_shape=strict_shape,
)
predt = _maybe_dataframe(partition, predt, columns, is_df)
return predt
# Predict on dask collection directly.
if isinstance(data, (da.Array, dd.DataFrame)):
_output_shape, meta = await client.compute(
client.submit(
_infer_predict_output,
_booster,
features=data.shape[1],
is_df=isinstance(data, dd.DataFrame),
inplace=False,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
strict_shape=strict_shape,
)
)
return await _direct_predict_impl(
mapped_predict, _booster, data, None, _output_shape, meta
)
output_shape, _ = await client.compute(
client.submit(
_infer_predict_output,
booster=_booster,
features=data.num_col(),
is_df=False,
inplace=False,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
strict_shape=strict_shape,
)
)
# Prediction on dask DMatrix.
partition_order = data.partition_order
feature_names = data.feature_names
feature_types = data.feature_types
missing = data.missing
def dispatched_predict(booster: Booster, part: Dict[str, Any]) -> numpy.ndarray:
data = part["data"]
base_margin = part.get("base_margin", None)
with config.config_context(**global_config):
m = DMatrix(
data,
missing=missing,
base_margin=base_margin,
feature_names=feature_names,
feature_types=feature_types,
)
predt = booster.predict(
m,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
validate_features=validate_features,
iteration_range=iteration_range,
strict_shape=strict_shape,
)
return predt
all_parts = []
all_orders = []
all_shapes = []
all_workers: List[str] = []
workers_address = list(data.worker_map.keys())
for worker_addr in workers_address:
list_of_parts = data.worker_map[worker_addr]
all_parts.extend(list_of_parts)
all_workers.extend(len(list_of_parts) * [worker_addr])
all_orders.extend([partition_order[part.key] for part in list_of_parts])
for w, part in zip(all_workers, all_parts):
s = client.submit(lambda part: part["data"].shape[0], part, workers=[w])
all_shapes.append(s)
parts_with_order = list(zip(all_parts, all_shapes, all_orders, all_workers))
parts_with_order = sorted(parts_with_order, key=lambda p: p[2])
all_parts = [part for part, shape, order, w in parts_with_order]
all_shapes = [shape for part, shape, order, w in parts_with_order]
all_workers = [w for part, shape, order, w in parts_with_order]
futures = []
for w, part in zip(all_workers, all_parts):
f = client.submit(dispatched_predict, _booster, part, workers=[w])
futures.append(f)
# Constructing a dask array from list of numpy arrays
# See https://docs.dask.org/en/latest/array-creation.html
arrays = []
all_shapes = await client.gather(all_shapes)
for i, rows in enumerate(all_shapes):
arrays.append(
da.from_delayed(
futures[i], shape=(rows,) + output_shape[1:], dtype=numpy.float32
)
)
predictions = da.concatenate(arrays, axis=0)
return predictions
def predict( # pylint: disable=unused-argument
client: "distributed.Client",
model: Union[TrainReturnT, Booster, "distributed.Future"],
data: Union[DaskDMatrix, _DaskCollection],
output_margin: bool = False,
missing: float = numpy.nan,
pred_leaf: bool = False,
pred_contribs: bool = False,
approx_contribs: bool = False,
pred_interactions: bool = False,
validate_features: bool = True,
iteration_range: Tuple[int, int] = (0, 0),
strict_shape: bool = False,
) -> Any:
"""Run prediction with a trained booster.
.. note::
Using ``inplace_predict`` might be faster when some features are not needed. See
:py:meth:`xgboost.Booster.predict` for details on various parameters. When output
has more than 2 dimensions (shap value, leaf with strict_shape), input should be
``da.Array`` or ``DaskDMatrix``.
.. versionadded:: 1.0.0
Parameters
----------
client:
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model:
The trained model. It can be a distributed.Future so user can
pre-scatter it onto all workers.
data:
Input data used for prediction. When input is a dataframe object,
prediction output is a series.
missing:
Used when input data is not DaskDMatrix. Specify the value
considered as missing.
Returns
-------
prediction: dask.array.Array/dask.dataframe.Series
When input data is ``dask.array.Array`` or ``DaskDMatrix``, the return value is an
array, when input data is ``dask.dataframe.DataFrame``, return value can be
``dask.dataframe.Series``, ``dask.dataframe.DataFrame``, depending on the output
shape.
"""
_assert_dask_support()
client = _xgb_get_client(client)
return client.sync(_predict_async, global_config=config.get_config(), **locals())
async def _inplace_predict_async( # pylint: disable=too-many-branches
client: "distributed.Client",
global_config: Dict[str, Any],
model: Union[Booster, Dict, "distributed.Future"],
data: _DaskCollection,
iteration_range: Tuple[int, int],
predict_type: str,
missing: float,
validate_features: bool,
base_margin: Optional[_DaskCollection],
strict_shape: bool,
) -> _DaskCollection:
client = _xgb_get_client(client)
booster = await _get_model_future(client, model)
if not isinstance(data, (da.Array, dd.DataFrame)):
raise TypeError(_expect([da.Array, dd.DataFrame], type(data)))
if base_margin is not None and not isinstance(
data, (da.Array, dd.DataFrame, dd.Series)
):
raise TypeError(_expect([da.Array, dd.DataFrame, dd.Series], type(base_margin)))
def mapped_predict(
booster: Booster,
partition: Any,
is_df: bool,
columns: List[int],
base_margin: Any,
) -> Any:
with config.config_context(**global_config):
prediction = booster.inplace_predict(
partition,
iteration_range=iteration_range,
predict_type=predict_type,
missing=missing,
base_margin=base_margin,
validate_features=validate_features,
strict_shape=strict_shape,
)
prediction = _maybe_dataframe(partition, prediction, columns, is_df)
return prediction
# await turns future into value.
shape, meta = await client.compute(
client.submit(
_infer_predict_output,
booster,
features=data.shape[1],
is_df=isinstance(data, dd.DataFrame),
inplace=True,
predict_type=predict_type,
iteration_range=iteration_range,
strict_shape=strict_shape,
)
)
return await _direct_predict_impl(
mapped_predict, booster, data, base_margin, shape, meta
)
def inplace_predict( # pylint: disable=unused-argument
client: "distributed.Client",
model: Union[TrainReturnT, Booster, "distributed.Future"],
data: _DaskCollection,
iteration_range: Tuple[int, int] = (0, 0),
predict_type: str = "value",
missing: float = numpy.nan,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None,
strict_shape: bool = False,
) -> Any:
"""Inplace prediction. See doc in :py:meth:`xgboost.Booster.inplace_predict` for details.
.. versionadded:: 1.1.0
Parameters
----------
client:
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model:
See :py:func:`xgboost.dask.predict` for details.
data :
dask collection.
iteration_range:
See :py:meth:`xgboost.Booster.predict` for details.
predict_type:
See :py:meth:`xgboost.Booster.inplace_predict` for details.
missing:
Value in the input data which needs to be present as a missing
value. If None, defaults to np.nan.
base_margin:
See :py:obj:`xgboost.DMatrix` for details.
.. versionadded:: 1.4.0
strict_shape:
See :py:meth:`xgboost.Booster.predict` for details.
.. versionadded:: 1.4.0
Returns
-------
prediction :
When input data is ``dask.array.Array``, the return value is an array, when input
data is ``dask.dataframe.DataFrame``, return value can be
``dask.dataframe.Series``, ``dask.dataframe.DataFrame``, depending on the output
shape.
"""
_assert_dask_support()
client = _xgb_get_client(client)
# When used in asynchronous environment, the `client` object should have
# `asynchronous` attribute as True. When invoked by the skl interface, it's
# responsible for setting up the client.
return client.sync(
_inplace_predict_async, global_config=config.get_config(), **locals()
)
async def _async_wrap_evaluation_matrices(
client: "distributed.Client", **kwargs: Any
) -> Tuple[DaskDMatrix, Optional[List[Tuple[DaskDMatrix, str]]]]:
"""A switch function for async environment."""
def _inner(**kwargs: Any) -> DaskDMatrix:
m = DaskDMatrix(client=client, **kwargs)
return m
train_dmatrix, evals = _wrap_evaluation_matrices(create_dmatrix=_inner, **kwargs)
train_dmatrix = await train_dmatrix
if evals is None:
return train_dmatrix, evals
awaited = []
for e in evals:
if e[0] is train_dmatrix: # already awaited
awaited.append(e)
continue
awaited.append((await e[0], e[1]))
return train_dmatrix, awaited
@contextmanager
def _set_worker_client(
model: "DaskScikitLearnBase", client: "distributed.Client"
) -> Generator:
"""Temporarily set the client for sklearn model."""
try:
model.client = client
yield model
finally:
model.client = None
class DaskScikitLearnBase(XGBModel):
"""Base class for implementing scikit-learn interface with Dask"""
_client = None
async def _predict_async(
self,
data: _DaskCollection,
output_margin: bool,
validate_features: bool,
base_margin: Optional[_DaskCollection],
iteration_range: Optional[Tuple[int, int]],
) -> Any:
iteration_range = self._get_iteration_range(iteration_range)
if self._can_use_inplace_predict():
predts = await inplace_predict(
client=self.client,
model=self.get_booster(),
data=data,
iteration_range=iteration_range,
predict_type="margin" if output_margin else "value",
missing=self.missing,
base_margin=base_margin,
validate_features=validate_features,
)
if isinstance(predts, dd.DataFrame):
predts = predts.to_dask_array()
else:
test_dmatrix = await DaskDMatrix(
self.client,
data=data,
base_margin=base_margin,
missing=self.missing,
feature_types=self.feature_types
)
predts = await predict(
self.client,
model=self.get_booster(),
data=test_dmatrix,
output_margin=output_margin,
validate_features=validate_features,
iteration_range=iteration_range,
)
return predts
def predict(
self,
X: _DaskCollection,
output_margin: bool = False,
ntree_limit: Optional[int] = None,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> Any:
_assert_dask_support()
msg = "`ntree_limit` is not supported on dask, use `iteration_range` instead."
assert ntree_limit is None, msg
return self.client.sync(
self._predict_async,
X,
output_margin=output_margin,
validate_features=validate_features,
base_margin=base_margin,
iteration_range=iteration_range,
)
async def _apply_async(
self,
X: _DaskCollection,
iteration_range: Optional[Tuple[int, int]] = None,
) -> Any:
iteration_range = self._get_iteration_range(iteration_range)
test_dmatrix = await DaskDMatrix(
self.client, data=X, missing=self.missing, feature_types=self.feature_types,
)
predts = await predict(
self.client,
model=self.get_booster(),
data=test_dmatrix,
pred_leaf=True,
iteration_range=iteration_range,
)
return predts
def apply(
self,
X: _DaskCollection,
ntree_limit: Optional[int] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> Any:
_assert_dask_support()
msg = "`ntree_limit` is not supported on dask, use `iteration_range` instead."
assert ntree_limit is None, msg
return self.client.sync(self._apply_async, X, iteration_range=iteration_range)
def __await__(self) -> Awaitable[Any]:
# Generate a coroutine wrapper to make this class awaitable.
async def _() -> Awaitable[Any]:
return self
return self._client_sync(_).__await__()
def __getstate__(self) -> Dict:
this = self.__dict__.copy()
if "_client" in this.keys():
del this["_client"]
return this
@property
def client(self) -> "distributed.Client":
"""The dask client used in this model. The `Client` object can not be serialized for
transmission, so if task is launched from a worker instead of directly from the
client process, this attribute needs to be set at that worker.
"""
client = _xgb_get_client(self._client)
return client
@client.setter
def client(self, clt: "distributed.Client") -> None:
# calling `worker_client' doesn't return the correct `asynchronous` attribute, so
# we have to pass it ourselves.
self._asynchronous = clt.asynchronous if clt is not None else False
self._client = clt
def _client_sync(self, func: Callable, **kwargs: Any) -> Any:
"""Get the correct client, when method is invoked inside a worker we
should use `worker_client' instead of default client.
"""
if self._client is None:
asynchronous = getattr(self, "_asynchronous", False)
try:
distributed.get_worker()
in_worker = True
except ValueError:
in_worker = False
if in_worker:
with distributed.worker_client() as client:
with _set_worker_client(self, client) as this:
ret = this.client.sync(
func, **kwargs, asynchronous=asynchronous
)
return ret
return ret
return self.client.sync(func, **kwargs, asynchronous=self.client.asynchronous)
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost.""", ["estimators", "model"]
)
class DaskXGBRegressor(DaskScikitLearnBase, XGBRegressorBase):
# pylint: disable=missing-class-docstring
async def _fit_async(
self,
X: _DaskCollection,
y: _DaskCollection,
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]],
eval_metric: Optional[Union[str, Sequence[str], Metric]],
sample_weight_eval_set: Optional[Sequence[_DaskCollection]],
base_margin_eval_set: Optional[Sequence[_DaskCollection]],
early_stopping_rounds: Optional[int],
verbose: bool,
xgb_model: Optional[Union[Booster, XGBModel]],
feature_weights: Optional[_DaskCollection],
callbacks: Optional[Sequence[TrainingCallback]],
) -> _DaskCollection:
params = self.get_xgb_params()
dtrain, evals = await _async_wrap_evaluation_matrices(
client=self.client,
X=X,
y=y,
group=None,
qid=None,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=None,
eval_qid=None,
missing=self.missing,
enable_categorical=self.enable_categorical,
feature_types=self.feature_types,
)
if callable(self.objective):
obj: Optional[Callable] = _objective_decorator(self.objective)
else:
obj = None
model, metric, params, early_stopping_rounds, callbacks = self._configure_fit(
xgb_model, eval_metric, params, early_stopping_rounds, callbacks
)
results = await self.client.sync(
_train_async,
asynchronous=True,
client=self.client,
global_config=config.get_config(),
dconfig=_get_dask_config(),
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
obj=obj,
feval=None,
custom_metric=metric,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks,
xgb_model=model,
)
self._Booster = results["booster"]
self._set_evaluation_result(results["history"])
return self
# pylint: disable=missing-docstring, disable=unused-argument
@_deprecate_positional_args
def fit(
self,
X: _DaskCollection,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Callable]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: bool = True,
xgb_model: Optional[Union[Booster, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[_DaskCollection]] = None,
base_margin_eval_set: Optional[Sequence[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "DaskXGBRegressor":
_assert_dask_support()
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
return self._client_sync(self._fit_async, **args)
@xgboost_model_doc(
"Implementation of the scikit-learn API for XGBoost classification.",
["estimators", "model"],
)
class DaskXGBClassifier(DaskScikitLearnBase, XGBClassifierBase):
# pylint: disable=missing-class-docstring
async def _fit_async(
self,
X: _DaskCollection,
y: _DaskCollection,
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]],
eval_metric: Optional[Union[str, Sequence[str], Metric]],
sample_weight_eval_set: Optional[Sequence[_DaskCollection]],
base_margin_eval_set: Optional[Sequence[_DaskCollection]],
early_stopping_rounds: Optional[int],
verbose: bool,
xgb_model: Optional[Union[Booster, XGBModel]],
feature_weights: Optional[_DaskCollection],
callbacks: Optional[Sequence[TrainingCallback]],
) -> "DaskXGBClassifier":
params = self.get_xgb_params()
dtrain, evals = await _async_wrap_evaluation_matrices(
self.client,
X=X,
y=y,
group=None,
qid=None,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=None,
eval_qid=None,
missing=self.missing,
enable_categorical=self.enable_categorical,
feature_types=self.feature_types,
)
# pylint: disable=attribute-defined-outside-init
if isinstance(y, (da.Array)):
self.classes_ = await self.client.compute(da.unique(y))
else:
self.classes_ = await self.client.compute(y.drop_duplicates())
self.n_classes_ = len(self.classes_)
if self.n_classes_ > 2:
params["objective"] = "multi:softprob"
params["num_class"] = self.n_classes_
else:
params["objective"] = "binary:logistic"
if callable(self.objective):
obj: Optional[Callable] = _objective_decorator(self.objective)
else:
obj = None
model, metric, params, early_stopping_rounds, callbacks = self._configure_fit(
xgb_model, eval_metric, params, early_stopping_rounds, callbacks
)
results = await self.client.sync(
_train_async,
asynchronous=True,
client=self.client,
global_config=config.get_config(),
dconfig=_get_dask_config(),
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
obj=obj,
feval=None,
custom_metric=metric,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks,
xgb_model=model,
)
self._Booster = results["booster"]
if not callable(self.objective):
self.objective = params["objective"]
self._set_evaluation_result(results["history"])
return self
# pylint: disable=unused-argument
def fit(
self,
X: _DaskCollection,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Callable]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: bool = True,
xgb_model: Optional[Union[Booster, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[_DaskCollection]] = None,
base_margin_eval_set: Optional[Sequence[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "DaskXGBClassifier":
_assert_dask_support()
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
return self._client_sync(self._fit_async, **args)
async def _predict_proba_async(
self,
X: _DaskCollection,
validate_features: bool,
base_margin: Optional[_DaskCollection],
iteration_range: Optional[Tuple[int, int]],
) -> _DaskCollection:
if self.objective == "multi:softmax":
raise ValueError(
"multi:softmax doesn't support `predict_proba`. "
"Switch to `multi:softproba` instead"
)
predts = await super()._predict_async(
data=X,
output_margin=False,
validate_features=validate_features,
base_margin=base_margin,
iteration_range=iteration_range,
)
vstack = update_wrapper(
partial(da.vstack, allow_unknown_chunksizes=True), da.vstack
)
return _cls_predict_proba(getattr(self, "n_classes_", 0), predts, vstack)
# pylint: disable=missing-function-docstring
def predict_proba(
self,
X: _DaskCollection,
ntree_limit: Optional[int] = None,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> Any:
_assert_dask_support()
msg = "`ntree_limit` is not supported on dask, use `iteration_range` instead."
assert ntree_limit is None, msg
return self._client_sync(
self._predict_proba_async,
X=X,
validate_features=validate_features,
base_margin=base_margin,
iteration_range=iteration_range,
)
predict_proba.__doc__ = XGBClassifier.predict_proba.__doc__
async def _predict_async(
self,
data: _DaskCollection,
output_margin: bool,
validate_features: bool,
base_margin: Optional[_DaskCollection],
iteration_range: Optional[Tuple[int, int]],
) -> _DaskCollection:
pred_probs = await super()._predict_async(
data, output_margin, validate_features, base_margin, iteration_range
)
if output_margin:
return pred_probs
if len(pred_probs.shape) == 1:
preds = (pred_probs > 0.5).astype(int)
else:
assert len(pred_probs.shape) == 2
assert isinstance(pred_probs, da.Array)
# when using da.argmax directly, dask will construct a numpy based return
# array, which runs into error when computing GPU based prediction.
def _argmax(x: Any) -> Any:
return x.argmax(axis=1)
preds = da.map_blocks(_argmax, pred_probs, drop_axis=1)
return preds
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost Ranking.
.. versionadded:: 1.4.0
""",
["estimators", "model"],
end_note="""
.. note::
For dask implementation, group is not supported, use qid instead.
""",
)
class DaskXGBRanker(DaskScikitLearnBase, XGBRankerMixIn):
@_deprecate_positional_args
def __init__(self, *, objective: str = "rank:pairwise", **kwargs: Any):
if callable(objective):
raise ValueError("Custom objective function not supported by XGBRanker.")
super().__init__(objective=objective, kwargs=kwargs)
async def _fit_async(
self,
X: _DaskCollection,
y: _DaskCollection,
group: Optional[_DaskCollection],
qid: Optional[_DaskCollection],
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]],
sample_weight_eval_set: Optional[Sequence[_DaskCollection]],
base_margin_eval_set: Optional[Sequence[_DaskCollection]],
eval_group: Optional[Sequence[_DaskCollection]],
eval_qid: Optional[Sequence[_DaskCollection]],
eval_metric: Optional[Union[str, Sequence[str], Metric]],
early_stopping_rounds: Optional[int],
verbose: bool,
xgb_model: Optional[Union[XGBModel, Booster]],
feature_weights: Optional[_DaskCollection],
callbacks: Optional[Sequence[TrainingCallback]],
) -> "DaskXGBRanker":
msg = "Use `qid` instead of `group` on dask interface."
if not (group is None and eval_group is None):
raise ValueError(msg)
if qid is None:
raise ValueError("`qid` is required for ranking.")
params = self.get_xgb_params()
dtrain, evals = await _async_wrap_evaluation_matrices(
self.client,
X=X,
y=y,
group=None,
qid=qid,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=None,
eval_qid=eval_qid,
missing=self.missing,
enable_categorical=self.enable_categorical,
feature_types=self.feature_types,
)
if eval_metric is not None:
if callable(eval_metric):
raise ValueError(
"Custom evaluation metric is not yet supported for XGBRanker."
)
model, metric, params, early_stopping_rounds, callbacks = self._configure_fit(
xgb_model, eval_metric, params, early_stopping_rounds, callbacks
)
results = await self.client.sync(
_train_async,
asynchronous=True,
client=self.client,
global_config=config.get_config(),
dconfig=_get_dask_config(),
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
obj=None,
feval=None,
custom_metric=metric,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks,
xgb_model=model,
)
self._Booster = results["booster"]
self.evals_result_ = results["history"]
return self
# pylint: disable=unused-argument, arguments-differ
@_deprecate_positional_args
def fit(
self,
X: _DaskCollection,
y: _DaskCollection,
*,
group: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_group: Optional[Sequence[_DaskCollection]] = None,
eval_qid: Optional[Sequence[_DaskCollection]] = None,
eval_metric: Optional[Union[str, Sequence[str], Callable]] = None,
early_stopping_rounds: int = None,
verbose: bool = False,
xgb_model: Optional[Union[XGBModel, Booster]] = None,
sample_weight_eval_set: Optional[Sequence[_DaskCollection]] = None,
base_margin_eval_set: Optional[Sequence[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "DaskXGBRanker":
_assert_dask_support()
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
return self._client_sync(self._fit_async, **args)
# FIXME(trivialfis): arguments differ due to additional parameters like group and qid.
fit.__doc__ = XGBRanker.fit.__doc__
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost Random Forest Regressor.
.. versionadded:: 1.4.0
""",
["model", "objective"],
extra_parameters="""
n_estimators : int
Number of trees in random forest to fit.
""",
)
class DaskXGBRFRegressor(DaskXGBRegressor):
@_deprecate_positional_args
def __init__(
self,
*,
learning_rate: Optional[float] = 1,
subsample: Optional[float] = 0.8,
colsample_bynode: Optional[float] = 0.8,
reg_lambda: Optional[float] = 1e-5,
**kwargs: Any,
) -> None:
super().__init__(
learning_rate=learning_rate,
subsample=subsample,
colsample_bynode=colsample_bynode,
reg_lambda=reg_lambda,
**kwargs,
)
def get_xgb_params(self) -> Dict[str, Any]:
params = super().get_xgb_params()
params["num_parallel_tree"] = self.n_estimators
return params
def get_num_boosting_rounds(self) -> int:
return 1
# pylint: disable=unused-argument
def fit(
self,
X: _DaskCollection,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Callable]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: bool = True,
xgb_model: Optional[Union[Booster, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[_DaskCollection]] = None,
base_margin_eval_set: Optional[Sequence[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "DaskXGBRFRegressor":
_assert_dask_support()
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
_check_rf_callback(early_stopping_rounds, callbacks)
super().fit(**args)
return self
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost Random Forest Classifier.
.. versionadded:: 1.4.0
""",
["model", "objective"],
extra_parameters="""
n_estimators : int
Number of trees in random forest to fit.
""",
)
class DaskXGBRFClassifier(DaskXGBClassifier):
@_deprecate_positional_args
def __init__(
self,
*,
learning_rate: Optional[float] = 1,
subsample: Optional[float] = 0.8,
colsample_bynode: Optional[float] = 0.8,
reg_lambda: Optional[float] = 1e-5,
**kwargs: Any,
) -> None:
super().__init__(
learning_rate=learning_rate,
subsample=subsample,
colsample_bynode=colsample_bynode,
reg_lambda=reg_lambda,
**kwargs,
)
def get_xgb_params(self) -> Dict[str, Any]:
params = super().get_xgb_params()
params["num_parallel_tree"] = self.n_estimators
return params
def get_num_boosting_rounds(self) -> int:
return 1
# pylint: disable=unused-argument
def fit(
self,
X: _DaskCollection,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Callable]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: bool = True,
xgb_model: Optional[Union[Booster, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[_DaskCollection]] = None,
base_margin_eval_set: Optional[Sequence[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "DaskXGBRFClassifier":
_assert_dask_support()
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
_check_rf_callback(early_stopping_rounds, callbacks)
super().fit(**args)
return self
|
main.py
|
from evolutionarystrategy import EvolutionaryStrategy
from fitness import Fitness
from model import Model
import sys
import multiprocessing as mp
import numpy as np
import csv
import torch
def generate_epsilon(seed, model):
torch.manual_seed(seed)
epsilon = {}
for key, shape in model.shape().items():
if model.params[key].type() == "torch.FloatTensor":
epsilon[key] = torch.randn(shape).float()
elif model.params[key].type() == "torch.LongTensor":
epsilon[key] = torch.randn(shape).long()
else:
epsilon[key] = torch.randn(shape)
return epsilon
if __name__ == '__main__':
impact = {
'imp_team': [0.01],
'imp_enemies': [0.1,0.1,0.1],
'imp_powerup': [0.02]
}
evo_strat = EvolutionaryStrategy(Model, Fitness, impact, populationsize=30, learning_rate = 0.01)
rewardcsv = open("Rewards.csv", "w")
winratecsv = open("Winrate.csv", "w")
rewardcsv.close()
winratecsv.close()
for i in range(100):
manager = mp.Manager()
output = manager.Queue()
processes = [mp.Process(target=evo_strat.evolution, args=(x, output)) for x in range(evo_strat.populationsize)]
for p in processes:
p.start()
for p in processes:
p.join()
results = [output.get() for p in processes]
rewards = [r[0] for r in results]
epsilons = []
seed = [epsilons.append(generate_epsilon(r[1], evo_strat.model)) for r in results]
evo_strat.model.update_params(epsilons, rewards, evo_strat.learning_rate)
print("Done with iteration {}".format(i))
if (i)%10==0:
winrate = evo_strat.play_game(10)
print("Average win rate over 10 games {}".format(winrate))
rewardcsv = open("Rewards.csv", "a")
winratecsv = open("Winrate.csv", "a")
with rewardcsv:
writer = csv.writer(rewardcsv)
writer.writerow(rewards)
with winratecsv:
writer = csv.writer(winratecsv)
writer.writerow([winrate])
rewardcsv.close()
winratecsv.close()
torch.save(evo_strat.model.policy.state_dict(),'Model.pt')
|
io_test_view.py
|
# External Dependencies
from threading import Thread
from pyzbar import pyzbar
from pyzbar.pyzbar import ZBarSymbol
import time
# Internal file class dependencies
from . import View
from seedsigner.helpers import B
class IOTestView(View):
def __init__(self) -> None:
View.__init__(self)
self.redraw = False
self.redraw_complete = False
self.qr_text = "Scan ANY QR Code"
self.exit = False
def display_io_test_screen(self):
# display loading screen
self.draw_modal(["Initializing I/O Test"])
print("Initializing I/O Test")
self.qr_text = "Scan ANY QR Code"
self.redraw = False
self.redraw_complete = False
self.exit = False
try:
self.controller.get_instance().camera.start_video_stream_mode()
t = Thread(target=self.qr_loop)
t.start()
except:
self.qr_text = "No Camera"
self.controller.get_instance().camera.stop_video_stream_mode()
while True:
self.draw_io_screen()
input = self.buttons.wait_for([B.KEY_UP, B.KEY_DOWN, B.KEY_PRESS, B.KEY_RIGHT, B.KEY_LEFT, B.KEY1, B.KEY2, B.KEY3], False)
if input == B.KEY_UP:
ret_val = self.up_button()
elif input == B.KEY_DOWN:
ret_val = self.down_button()
elif input == B.KEY_RIGHT:
ret_val = self.right_button()
elif input == B.KEY_LEFT:
ret_val = self.left_button()
elif input == B.KEY_PRESS:
ret_val = self.press_button()
elif input == B.KEY1:
ret_val = self.a_button()
elif input == B.KEY2:
ret_val = self.b_button()
elif input == B.KEY3:
ret_val = self.c_button()
return True
def qr_loop(self):
while True:
frame = self.controller.get_instance().camera.read_video_stream()
if frame is not None:
barcodes = pyzbar.decode(frame, symbols=[ZBarSymbol.QRCODE])
if len(barcodes) > 0:
self.draw_scan_detected()
time.sleep(0.05)
if self.controller.get_instance().camera._video_stream is None:
break
if self.exit == True:
break
def draw_io_screen(self):
self.redraw_complete = False
self.redraw = False
self.draw.rectangle((0,0,self.renderer.canvas_width, self.renderer.canvas_height), outline=0, fill=0)
self.draw.text((45, 5), "Input/Output Check:", fill=View.color, font=Fonts.get_font("Assistant-Medium", 18))
self.draw.polygon([(61, 89), (80, 46), (99, 89)], outline=View.color, fill=0)
self.draw.polygon([(51, 100), (8, 119), (51, 138)], outline=View.color, fill=0)
self.draw.polygon([(109, 100), (152, 119), (109, 138)], outline=View.color, fill=0)
self.draw.polygon([(61, 151), (80, 193), (99, 151)], outline=View.color, fill=0)
self.draw.ellipse([(61, 99), (99, 141)], outline=View.color, fill=0)
self.draw.ellipse([(198, 40), (238, 80)], outline=View.color, fill=0)
self.draw.ellipse([(198, 95), (238, 135)], outline=View.color, fill=0)
self.draw.text((200, 160), "EXIT", fill=View.color, font=Fonts.get_font("Assistant-Medium", 18))
self.draw.rectangle((30, 205, 210, 235), outline=View.color, fill="BLACK")
tw, th = self.draw.textsize(self.qr_text, font=Fonts.get_font("Assistant-Medium", 22))
self.draw.text(((240 - tw) / 2, 205), self.qr_text, fill=View.color, font=Fonts.get_font("Assistant-Medium", 22))
self.renderer.show_image()
self.redraw_complete = True
def a_button(self):
if self.redraw == False and self.redraw_complete == True:
self.draw.ellipse([(198, 40), (238, 80)], outline=View.color, fill=View.color)
self.renderer.show_image()
self.redraw = True
def b_button(self):
if self.redraw == False and self.redraw_complete == True:
self.draw.ellipse([(198, 95), (238, 135)], outline=View.color, fill=View.color)
self.renderer.show_image()
self.redraw = True
def c_button(self):
self.exit = True
self.controller.get_instance().camera.stop_video_stream_mode()
return
def up_button(self):
if self.redraw == False and self.redraw_complete == True:
self.draw.polygon([(61, 89), (80, 46), (99, 89)], outline=View.color, fill=View.color)
self.renderer.show_image()
self.redraw = True
def down_button(self):
if self.redraw == False and self.redraw_complete == True:
self.draw.polygon([(61, 151), (80, 193), (99, 151)], outline=View.color, fill=View.color)
self.renderer.show_image()
self.redraw = True
def left_button(self):
if self.redraw == False and self.redraw_complete == True:
self.draw.polygon([(51, 100), (8, 119), (51, 138)], outline=View.color, fill=View.color)
self.renderer.show_image()
self.redraw = True
def right_button(self):
if self.redraw == False and self.redraw_complete == True:
self.draw.polygon([(109, 100), (152, 119), (109, 138)], outline=View.color, fill=View.color)
self.renderer.show_image()
self.redraw = True
def press_button(self):
if self.redraw == False and self.redraw_complete == True:
self.draw.ellipse([(61, 99), (99, 141)], outline=View.color, fill=View.color)
self.renderer.show_image()
self.redraw = True
def draw_scan_detected(self):
self.qr_text = "QR Scanned"
if self.redraw == False and self.redraw_complete == True:
self.draw.rectangle((30, 205, 210, 235), outline=View.color, fill=View.color)
tw, th = self.draw.textsize(self.qr_text, font=Fonts.get_font("Assistant-Medium", 22))
self.draw.text(((240 - tw) / 2, 205), self.qr_text, fill="BLACK", font=Fonts.get_font("Assistant-Medium", 22))
self.renderer.show_image()
self.redraw = True
|
local_games.py
|
import logging as log
import os
import subprocess
import time
from pathlib import Path
from threading import Thread, Lock
from consts import Platform, SYSTEM, WINDOWS_UNINSTALL_LOCATION, LS_REGISTER
from definitions import BlizzardGame, ClassicGame, Blizzard
from pathfinder import PathFinder
from psutil import Process, AccessDenied
if SYSTEM == Platform.WINDOWS:
import winreg
pathfinder = PathFinder(SYSTEM)
class InstalledGame(object):
def __init__(self, info: BlizzardGame, uninstall_tag: str, version: str, last_played: str, install_path: str,
playable: bool, installed: bool = False):
self.info = info
self.uninstall_tag = uninstall_tag
self.version = version
self.last_played = last_played
self.install_path = install_path
self.playable = playable
self.installed = installed
self.execs = pathfinder.find_executables(self.install_path)
self._processes = set()
@property
def has_galaxy_installed_state(self) -> bool:
"""Indicates when Play button should be available in Galaxy"""
return self.playable or self.installed
def add_process(self, process: Process):
try:
if process.exe() in self.execs:
self._processes.add(process)
else:
raise ValueError(f"The process exe [{process.exe()}] doesn't match with the game execs: {self.execs}")
except AccessDenied:
if isinstance(self.info, ClassicGame):
if self.info.exe in process.name():
self._processes.add(process)
else:
raise ValueError(
f"The process name [{process.name()}] doesn't match with the game exe: {self.info.exe}")
def is_running(self):
for process in self._processes:
if process.is_running():
return True
else:
self._processes = set()
return False
def wait_until_game_stops(self):
while self.is_running():
time.sleep(0.5)
class LocalGames():
def __init__(self):
self.installed_classic_games_lock = Lock()
self.installed_classic_games = {}
self.parsed_classics = False
self.installed_battlenet_games = {}
self.installed_battlenet_games_lock = Lock()
self.parsed_battlenet = False
self._classic_games_thread = None
self._battlenet_games_thread = None
def _add_classic_game(self, game, key):
if game.registry_path:
try:
with winreg.OpenKey(key, game.registry_path) as game_key:
log.debug(f"Found classic game registry entry! {game.registry_path}")
install_path = winreg.QueryValueEx(game_key, game.registry_installation_key)[0]
if install_path.endswith('.exe'):
install_path = Path(install_path).parent
uninstall_path = winreg.QueryValueEx(game_key, "UninstallString")[0]
if os.path.exists(install_path):
log.debug(f"Found classic game is installed! {game.registry_path}")
return InstalledGame(
game,
uninstall_path,
'1.0',
'',
install_path,
True,
True,
)
except OSError:
return None
return None
def _find_classic_games(self):
classic_games = {}
log.debug("Looking for classic games")
if SYSTEM == Platform.WINDOWS:
try:
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
with winreg.OpenKey(reg, WINDOWS_UNINSTALL_LOCATION) as key:
for game in Blizzard.CLASSIC_GAMES:
log.debug(f"Checking if {game} is in registry ")
installed_game = self._add_classic_game(game, key)
if installed_game:
classic_games[game.uid] = installed_game
except OSError as e:
log.exception(f"Exception while looking for installed classic games {e}")
else:
proc = subprocess.run([LS_REGISTER,"-dump"], encoding='utf-8',stdout=subprocess.PIPE)
for game in Blizzard.CLASSIC_GAMES:
if game.bundle_id:
if game.bundle_id in proc.stdout:
classic_games[game.uid] = InstalledGame(
game,
'',
'1.0',
'',
'',
True,
True,
)
self.installed_classic_games_lock.acquire()
self.installed_classic_games = classic_games
self.installed_classic_games_lock.release()
if not self.parsed_classics:
self.parsed_classics = True
def parse_local_battlenet_games(self, database_parser_games, config_parser_games):
"""Game is considered as installed when present in both config and product.db"""
# give threads 4 seconds to finish
join_timeout = 4
log.info(f"Games found in db {database_parser_games}")
log.info(f"Games found in config {config_parser_games}")
try:
if not self._battlenet_games_thread or not self._battlenet_games_thread.isAlive():
self._battlenet_games_thread = Thread(target=self._get_battlenet_installed_games, daemon=True, args=[database_parser_games, config_parser_games])
self._battlenet_games_thread.start()
log.info("Started battlenet games thread")
except Exception as e:
log.exception(str(e))
finally:
self._battlenet_games_thread.join(join_timeout)
async def parse_local_classic_games(self):
# give threads 4 seconds to finish
join_timeout = 4
if not self._classic_games_thread or not self._classic_games_thread.isAlive():
self._classic_games_thread = Thread(target=self._find_classic_games, daemon=True)
self._classic_games_thread.start()
log.info("Started classic games thread")
self._classic_games_thread.join(join_timeout)
def _get_battlenet_installed_games(self, database_parser_games, config_parser_games):
def _add_battlenet_game(config_game, db_game):
if config_game.uninstall_tag != db_game.uninstall_tag:
return None
try:
blizzard_game = Blizzard[config_game.uid]
except KeyError:
log.warning(f'[{config_game.uid}] is not known blizzard game. Skipping')
return None
try:
log.info(f"Adding {blizzard_game.uid} {blizzard_game.name} to installed games")
return InstalledGame(
blizzard_game,
config_game.uninstall_tag,
db_game.version,
config_game.last_played,
db_game.install_path,
db_game.playable,
db_game.installed,
)
except FileNotFoundError as e:
log.warning(str(e) + '. Probably outdated product.db after uninstall. Skipping')
return None
games = {}
for db_game in database_parser_games:
for config_game in config_parser_games:
installed_game = _add_battlenet_game(config_game, db_game)
if installed_game:
games[installed_game.info.uid] = installed_game
self.installed_battlenet_games_lock.acquire()
self.installed_battlenet_games = games
self.installed_battlenet_games_lock.release()
if not self.parsed_battlenet:
self.parsed_battlenet = True
|
fetchVioDetail.py
|
#!/usr/bin/env python
##
# Copyright (C) 2016 University of Southern California and
# Nan Hua
#
# Authors: Nan Hua and Hanjun Shin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import numpy as np
import multiprocessing
import re
import argparse
import alab.files
import alab.matrix
import alab.utils
import alab.plots
__author__ = "Nan Hua"
__credits__ = ["Hanjun Shin"]
__license__ = "GPL"
__version__ = "0.0.1"
__email__ = "nhua@usc.edu"
def getVios(copystart,copyend,queue, struct_dir, prob):
for i in range(copystart,copyend):
sts = alab.files.locusStructures(struct_dir+'/copy'+str(i)+'.hms',[prob])
logs = sts[-1].log
vios = re.findall(': (\d+.\d+) k = (\d+.\d+) (\d+.\d+)',logs)
detail = []
for v in vios:
dist,k,score = float(v[0]),float(v[1]),float(v[2])
detail.append(1+(2*score/k)**0.5/dist)
queue.put(detail)
#-
return 0
def listener(queue, prob, output_file):
detail = []
i=0
while True:
rs = queue.get()
if rs == 'kill':
break
detail += rs
i+=1
#if i % 1000 == 0:
# print "%.3f %%" % (i*100.0/nstruct)
#-
detail = np.array(detail)
#np.savetxt('%s_viodetail.txt'%(prob),detail,fmt='%f')
v = np.percentile(detail,99) #99% percentile
cutoff = np.percentile(detail,99.9)
vio = detail[detail<cutoff]
alab.plots.histogram(output_file, vio, 300,format='pdf',xlab='Violation Ratio',ylab='Frequency',histtype='stepfilled',color='g',line=v)
return 0
def plotVio(prob, nstruct, struct_dir, output_file):
pid = 10
record = []
manager = multiprocessing.Manager()
queue = manager.Queue()
watcher = multiprocessing.Process(target = listener,args=(queue, prob, output_file))
watcher.start()
for k in range(pid):
start = k*(nstruct/pid)
end = (k+1)*(nstruct/pid)
process = multiprocessing.Process(target=getVios,args=(start,end,queue, struct_dir, prob))
process.start()
record.append(process)
for process in record:
process.join()
queue.put('kill')
watcher.join()
if __name__=='__main__':
parser = argparse.ArgumentParser(description="report_summary.py")
parser.add_argument('-p', '--prob', type=str, required=True) #last frequency
parser.add_argument('-n', '--nstruct', type=int, required=True) #current freq
parser.add_argument('-s', '--struct_dir', type=str, required=True) #probility matrix file in contactmatrix format
parser.add_argument('-o', '--output_file', type=str, required=True) #current freq
args = parser.parse_args()
plotVio(args.prob, args.nstruct, args.struct_dir, args.output_file)
|
progress.py
|
"""Utilities for progress tracking and display to the user."""
from __future__ import absolute_import, division
from datetime import timedelta
import importlib
import os
import sys
import threading
import time
import uuid
import warnings
import numpy as np
from .compat import escape
from .stdlib import get_terminal_size
from .ipython import check_ipy_version, get_ipython
from ..exceptions import ValidationError
from ..rc import rc
if get_ipython() is not None:
from IPython.display import display, Javascript
class MemoryLeakWarning(UserWarning):
pass
warnings.filterwarnings('once', category=MemoryLeakWarning)
def timestamp2timedelta(timestamp):
if timestamp == -1:
return "Unknown"
return timedelta(seconds=np.ceil(timestamp))
def _load_class(name):
mod_name, cls_name = name.rsplit('.', 1)
mod = importlib.import_module(mod_name)
return getattr(mod, cls_name)
class Progress(object):
"""Stores and tracks information about the progress of some process.
This class is to be used as part of a ``with`` statement. Use ``step()`` to
update the progress.
Parameters
----------
max_steps : int
The total number of calculation steps of the process.
name_during : str, optional
Short description of the task to be used while it is running.
name_after : str, optional
Short description of the task to be used after it has
finished. Defaults to ``name_during``.
Attributes
----------
max_steps : int, optional
The total number of calculation steps of the process, if known.
name_after : str
Name of the task to be used after it has finished.
name_during : str
Name of the task to be used while it is running.
steps : int
Number of completed steps.
success : bool or None
Whether the process finished successfully. ``None`` if the process
did not finish yet.
time_end : float
Time stamp of the time the process was finished or aborted.
time_start : float
Time stamp of the time the process was started.
Examples
--------
>>> max_steps = 10
>>> with Progress(max_steps=max_steps) as progress:
... for i in range(max_steps):
... # do something
... progress.step()
"""
def __init__(self, name_during='', name_after=None, max_steps=None):
if max_steps is not None and max_steps <= 0:
raise ValidationError("must be at least 1 (got %d)"
% (max_steps,), attr="max_steps")
self.n_steps = 0
self.max_steps = max_steps
self.name_during = name_during
if name_after is None:
name_after = name_during
self.name_after = name_after
self.time_start = self.time_end = time.time()
self.finished = False
self.success = None
@property
def progress(self):
"""The current progress as a number from 0 to 1 (inclusive).
Returns
-------
float
"""
if self.max_steps is None:
return 0.
return min(1.0, self.n_steps / self.max_steps)
def elapsed_seconds(self):
"""The number of seconds passed since entering the ``with`` statement.
Returns
-------
float
"""
if self.finished:
return self.time_end - self.time_start
else:
return time.time() - self.time_start
def eta(self):
"""The estimated number of seconds until the process is finished.
Stands for estimated time of arrival (ETA).
If no estimate is available -1 will be returned.
Returns
-------
float
"""
if self.progress > 0.:
return (
(1. - self.progress) * self.elapsed_seconds() / self.progress)
else:
return -1
def __enter__(self):
self.finished = False
self.success = None
self.n_steps = 0
self.time_start = time.time()
return self
def __exit__(self, exc_type, dummy_exc_value, dummy_traceback):
self.success = exc_type is None
if self.success and self.max_steps is not None:
self.n_steps = self.max_steps
self.time_end = time.time()
self.finished = True
def step(self, n=1):
"""Advances the progress.
Parameters
----------
n : int
Number of steps to advance the progress by.
"""
self.n_steps += n
class ProgressBar(object):
"""Visualizes the progress of a process.
This is an abstract base class that progress bar classes some inherit from.
Progress bars should visually displaying the progress in some way.
"""
def update(self, progress):
"""Updates the displayed progress.
Parameters
----------
progress : Progress
The progress information to display.
"""
raise NotImplementedError()
def close(self):
"""Closes the progress bar.
Indicates that not further updates will be made.
"""
pass
class NoProgressBar(ProgressBar):
"""A progress bar that does not display anything.
Helpful in headless situations or when using Nengo as a library.
"""
def update(self, progress):
pass
class TerminalProgressBar(ProgressBar):
"""A progress bar that is displayed as ASCII output on ``stdout``."""
def update(self, progress):
if progress.finished:
line = self._get_finished_line(progress)
elif progress.max_steps is None:
line = self._get_unknown_progress_line(progress)
else:
line = self._get_in_progress_line(progress)
sys.stdout.write(line)
sys.stdout.flush()
def _get_in_progress_line(self, progress):
line = "[{{}}] ETA: {eta}".format(
eta=timestamp2timedelta(progress.eta()))
percent_str = " {}... {}% ".format(
progress.name_during, int(100 * progress.progress))
width, _ = get_terminal_size()
progress_width = max(0, width - len(line))
progress_str = (
int(progress_width * progress.progress) * "#").ljust(
progress_width)
percent_pos = (len(progress_str) - len(percent_str)) // 2
if percent_pos > 0:
progress_str = (
progress_str[:percent_pos] + percent_str
+ progress_str[percent_pos + len(percent_str):])
return '\r' + line.format(progress_str)
def _get_unknown_progress_line(self, progress):
"""Generates a progress line with continuously moving marker.
This is to indicate processing while not knowing how far along we
progressed with the processing.
"""
duration = progress.elapsed_seconds()
line = "[{{}}] duration: {duration}".format(
duration=timestamp2timedelta(duration))
text = " {}... ".format(progress.name_during)
width, _ = get_terminal_size()
marker = '>>>>'
progress_width = max(0, width - len(line) + 2)
index_width = progress_width + len(marker)
i = int(10. * duration) % (index_width + 1)
progress_str = (' ' * i) + marker + (' ' * (index_width - i))
progress_str = progress_str[len(marker):-len(marker)]
text_pos = (len(progress_str) - len(text)) // 2
progress_str = (
progress_str[:text_pos] + text
+ progress_str[text_pos + len(text):])
return '\r' + line.format(progress_str)
def _get_finished_line(self, progress):
width, _ = get_terminal_size()
line = "{} finished in {}.".format(
progress.name_after,
timestamp2timedelta(progress.elapsed_seconds())).ljust(width)
return '\r' + line
def close(self):
sys.stdout.write(os.linesep)
sys.stdout.flush()
class VdomProgressBar(ProgressBar):
"""A progress bar using a virtual DOM representation.
This HTML representation can be used in Jupyter lab (>=0.32) environments.
"""
def __init__(self):
super(VdomProgressBar, self).__init__()
self._uuid = uuid.uuid4()
self._handle = None
self.progress = None
def update(self, progress):
self.progress = progress
if self._handle is None:
self._handle = display(self, display_id=True)
else:
self._handle.update(self)
def _repr_mimebundle_(self, include, exclude, **kwargs):
return {
'application/vdom.v1+json': self._get_vdom(self.progress)
}
def _get_vdom(self, progress):
return {
'tagName': 'div',
'attributes': {
'id': str(self._uuid),
'style': {
'width': '100%',
'boxSizing': 'border-box',
'border': '1px solid #cfcfcf',
'borderRadius': '4px',
'textAlign': 'center',
'position': 'relative',
},
},
'children': [{
'tagName': 'div',
'attributes': {
'class': 'pb-text',
'style': {'position': 'absolute', 'width': '100%'},
},
'children': [self._get_text(self.progress)],
}, {
'tagName': 'div',
'attributes': {
'class': 'pb-fill',
'style': self._get_fill_style(self.progress),
},
'children': [{
'tagName': 'style',
'attributes': {
'type': 'text/css',
'scoped': 'scoped',
},
'children': ['''
@keyframes pb-fill-anim {
0% { background-position: 0 0; }
100% { background-position: 100px 0; }
}}'''],
}, "\u00A0" # non-breaking space
],
}],
}
def _get_text(self, progress):
if progress is None:
text = ''
elif progress.finished:
text = "{} finished in {}.".format(
escape(progress.name_after),
timestamp2timedelta(progress.elapsed_seconds()))
elif progress.max_steps is None:
text = (
"{task}\u2026 duration: {duration}".format(
task=escape(progress.name_during),
duration=timestamp2timedelta(progress.elapsed_seconds())))
else:
text = (
"{task}\u2026 {progress:.0f}%, ETA: {eta}".format(
task=escape(progress.name_during),
progress=100. * progress.progress,
eta=timestamp2timedelta(progress.eta())))
return text
def _get_fill_style(self, progress):
if progress.max_steps is None:
style = self._get_unknown_steps_fill_style(progress)
else:
style = self._get_known_steps_fill_style(progress)
if progress.finished:
style['animation'] = 'none'
style['backgroundImage'] = 'none'
return style
def _get_known_steps_fill_style(self, progress):
return {
'width': '{:.0f}%'.format(100. * progress.progress),
'animation': 'none',
'backgroundColor': '#bdd2e6',
'backgroundImage': 'none',
'transition':
'width 0.1s linear' if progress.progress > 0. else 'none',
}
def _get_unknown_steps_fill_style(self, progress):
return {
'width': '100%',
'animation': 'pb-fill-anim 2s linear infinite',
'backgroundColor': '#bdd2e6',
'backgroundSize': '100px 100%',
'backgroundImage': (
'repeating-linear-gradient('
'90deg, #bdd2e6, #edf2f8 40%, #bdd2e6 80%, #bdd2e6)'),
}
class HtmlProgressBar(ProgressBar):
"""A progress bar using a HTML representation.
This HTML representation can be used in Jupyter notebook environments
and is provided by the *_repr_html_* method that will be automatically
used by IPython interpreters.
If the kernel frontend does not support HTML (e.g., in Jupyter qtconsole),
a warning message will be issued as the ASCII representation.
"""
def __init__(self):
super(HtmlProgressBar, self).__init__()
self._uuid = uuid.uuid4()
self._handle = None
def update(self, progress):
if self._handle is None:
display(self._HtmlBase(self._uuid))
self._handle = display(self._js_update(progress), display_id=True)
else:
self._handle.update(self._js_update(progress))
class _HtmlBase(object):
def __init__(self, uuid):
self.uuid = uuid
def __repr__(self):
return (
"HtmlProgressBar cannot be displayed. Please use the "
"TerminalProgressBar. It can be enabled with "
"`nengo.rc.set('progress', 'progress_bar', "
"'nengo.utils.progress.TerminalProgressBar')`.")
def _repr_html_(self):
return '''
<script>
if (Jupyter.version.split(".")[0] < 5) {{
var pb = document.getElementById("{uuid}");
var text = document.createTextNode(
"HMTL progress bar requires Jupyter Notebook >= " +
"5.0 or Jupyter Lab. Alternatively, you can use " +
"TerminalProgressBar().");
pb.parentNode.insertBefore(text, pb);
}}
</script>
<div id="{uuid}" style="
width: 100%;
border: 1px solid #cfcfcf;
border-radius: 4px;
text-align: center;
position: relative;">
<div class="pb-text" style="
position: absolute;
width: 100%;">
0%
</div>
<div class="pb-fill" style="
background-color: #bdd2e6;
width: 0%;">
<style type="text/css" scoped="scoped">
@keyframes pb-fill-anim {{
0% {{ background-position: 0 0; }}
100% {{ background-position: 100px 0; }}
}}
</style>
</div>
</div>'''.format(uuid=self.uuid)
def _js_update(self, progress):
if progress is None:
text = ''
elif progress.finished:
text = "{} finished in {}.".format(
escape(progress.name_after),
timestamp2timedelta(progress.elapsed_seconds()))
elif progress.max_steps is None:
text = (
"{task}… duration: {duration}".format(
task=escape(progress.name_during),
duration=timestamp2timedelta(progress.elapsed_seconds())))
else:
text = (
"{task}… {progress:.0f}%, ETA: {eta}".format(
task=escape(progress.name_during),
progress=100. * progress.progress,
eta=timestamp2timedelta(progress.eta())))
if progress.max_steps is None:
update = self._update_unknown_steps(progress)
else:
update = self._update_known_steps(progress)
if progress.finished:
finish = '''
fill.style.animation = 'none';
fill.style.backgroundImage = 'none';
'''
else:
finish = ''
return Javascript('''
(function () {{
var root = document.getElementById('{uuid}');
var text = root.getElementsByClassName('pb-text')[0];
var fill = root.getElementsByClassName('pb-fill')[0];
text.innerHTML = '{text}';
{update}
{finish}
}})();
'''.format(uuid=self._uuid, text=text, update=update, finish=finish))
def _update_known_steps(self, progress):
return '''
if ({progress} > 0.) {{
fill.style.transition = 'width 0.1s linear';
}} else {{
fill.style.transition = 'none';
}}
fill.style.width = '{progress}%';
fill.style.animation = 'none';
fill.style.backgroundImage = 'none'
'''.format(progress=100. * progress.progress)
def _update_unknown_steps(self, progress):
return '''
fill.style.width = '100%';
fill.style.animation = 'pb-fill-anim 2s linear infinite';
fill.style.backgroundSize = '100px 100%';
fill.style.backgroundImage = 'repeating-linear-gradient(' +
'90deg, #bdd2e6, #edf2f8 40%, #bdd2e6 80%, #bdd2e6)';
'''
class VdomOrHtmlProgressBar(ProgressBar):
"""Progress bar using the VDOM or HTML progress bar.
This progress bar will transmit both representations as part of a MIME
bundle and it is up to the Jupyter client to pick the preferred version.
Usually this will be the VDOM if supported, and the HMTL version where VDOM
is not supported.
"""
def __init__(self):
super(VdomOrHtmlProgressBar, self).__init__()
self._handle = None
self._vdom = VdomProgressBar()
self._html = HtmlProgressBar()
def update(self, progress):
self._vdom.progress = progress
if self._handle is None:
display(self._get_initial_bundle(progress), raw=True)
self._handle = display(
self._get_update_bundle(progress), raw=True, display_id=True)
self._handle.update(self._get_update_bundle(progress), raw=True)
def _get_initial_bundle(self, progress):
return {
'application/vdom.v1+json': {
'tagName': 'div', 'attributes': {}
},
'text/html': self._html._HtmlBase(self._html._uuid)._repr_html_(),
'text/plain': repr(self._html._HtmlBase(self._html._uuid)),
}
def _get_update_bundle(self, progress):
bundle = self._vdom._repr_mimebundle_([], [])
bundle['application/javascript'] = self._html._js_update(
progress)._repr_javascript_()
return bundle
class IPython5ProgressBar(ProgressBar):
"""ProgressBar for IPython>=5 environments.
Provides a VDOM/HTML representation, except for in a pure terminal IPython
(i.e. not an IPython kernel that was connected to via ZMQ), where a
ASCII progress bar will be used.
Note that some Jupyter environments (like qtconsole) will try to use the
VDOM/HTML version, but do not support HTML and will show a warning instead
of an actual progress bar.
"""
def __init__(self):
super(IPython5ProgressBar, self).__init__()
class Displayable(object):
def __init__(self):
self.display_requested = False
def _ipython_display_(self):
self.display_requested = True
d = Displayable()
display(d, exclude=['text/plain'])
if d.display_requested:
self._progress_bar = VdomOrHtmlProgressBar()
else:
self._progress_bar = TerminalProgressBar()
def update(self, progress):
self._progress_bar.update(progress)
class WriteProgressToFile(ProgressBar):
"""Writes progress to a file.
This is useful for remotely and intermittently monitoring progress.
Note that this file will be overwritten on each update of the progress!
Parameters
----------
filename : str
Path to the file to write the progress to.
"""
def __init__(self, filename):
self.filename = filename
super(WriteProgressToFile, self).__init__()
def update(self, progress):
if progress.finished:
text = "{} finished in {}.".format(
self.progress.name_after,
timestamp2timedelta(progress.elapsed_seconds()))
else:
text = "{progress:.0f}%, ETA: {eta}".format(
progress=100 * progress.progress,
eta=timestamp2timedelta(progress.eta()))
with open(self.filename, 'w') as f:
f.write(text + os.linesep)
class AutoProgressBar(ProgressBar):
"""Suppresses the progress bar unless the ETA exceeds a threshold.
Parameters
----------
delegate : ProgressBar
The actual progress bar to display, if ETA is high enough.
min_eta : float, optional
The minimum ETA threshold for displaying the progress bar.
"""
def __init__(self, delegate, min_eta=1.):
self.delegate = delegate
super(AutoProgressBar, self).__init__()
self.min_eta = min_eta
self._visible = False
def update(self, progress):
min_delay = progress.time_start + 0.1
long_eta = (progress.elapsed_seconds() + progress.eta() > self.min_eta
and min_delay < time.time())
if self._visible:
self.delegate.update(progress)
elif long_eta or progress.finished:
self._visible = True
self.delegate.update(progress)
def close(self):
self.delegate.close()
class ProgressTracker(object):
"""Tracks the progress of some process with a progress bar.
Parameters
----------
progress_bar : ProgressBar or bool or None
The progress bar to display the progress (or True to use the default
progress bar, False/None to disable progress bar).
total_progress : int
Maximum number of steps of the process.
update_interval : float, optional
Time to wait (in seconds) between updates to progress bar display.
"""
def __init__(self, progress_bar, total_progress, update_interval=0.1):
self.progress_bar = to_progressbar(progress_bar)
self.total_progress = total_progress
self.update_interval = update_interval
self.update_thread = threading.Thread(target=self.update_loop)
self.update_thread.daemon = True
self._closing = False
self.sub_progress = None
def next_stage(self, name_during='', name_after=None, max_steps=None):
"""Begin tracking progress of a new stage.
Parameters
----------
max_steps : int, optional
The total number of calculation steps of the process.
name_during : str, optional
Short description of the task to be used while it is running.
name_after : str, optional
Short description of the task to be used after it has
finished. Defaults to *name_during*.
"""
if self.sub_progress is not None:
self.total_progress.step()
self.sub_progress = Progress(name_during, name_after, max_steps)
return self.sub_progress
def __enter__(self):
self._closing = False
self.total_progress.__enter__()
if not isinstance(self.progress_bar, NoProgressBar):
self.update_thread.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._closing = True
self.total_progress.__exit__(exc_type, exc_value, traceback)
if not isinstance(self.progress_bar, NoProgressBar):
self.update_thread.join()
self.progress_bar.update(self.total_progress)
self.progress_bar.close()
def update_loop(self):
"""Update the progress bar display (will run in a separate thread)."""
while not self._closing:
if (self.sub_progress is not None
and not self.sub_progress.finished):
self.progress_bar.update(self.sub_progress)
else:
self.progress_bar.update(self.total_progress)
time.sleep(self.update_interval)
def get_default_progressbar():
"""The default progress bar to use depending on the execution environment.
Returns
-------
``ProgressBar``
"""
try:
pbar = rc.getboolean('progress', 'progress_bar')
if pbar:
pbar = 'auto'
else:
pbar = 'none'
except ValueError:
pbar = rc.get('progress', 'progress_bar')
if pbar.lower() == 'auto':
if get_ipython() is not None and check_ipy_version((5, 0)):
return AutoProgressBar(IPython5ProgressBar())
else:
return AutoProgressBar(TerminalProgressBar())
if pbar.lower() == 'none':
return NoProgressBar()
try:
return _load_class(pbar)()
except Exception as e:
warnings.warn(str(e))
return NoProgressBar()
def to_progressbar(progress_bar):
"""Converts to a ``ProgressBar`` instance.
Parameters
----------
progress_bar : None, bool, or ProgressBar
Object to be converted to a ``ProgressBar``.
Returns
-------
ProgressBar
Return ``progress_bar`` if it is already a progress bar, the default
progress bar if ``progress_bar`` is ``True``, and ``NoProgressBar`` if
it is ``None`` or ``False``.
"""
if progress_bar is False or progress_bar is None:
progress_bar = NoProgressBar()
if progress_bar is True:
progress_bar = get_default_progressbar()
return progress_bar
|
API.py
|
import json
import os
import threading
from datetime import date, datetime
import wx
from yandex_music.client import Client
from yandex_music.exceptions import Captcha
import events.events as events
from configs.configs import Configs
class YandexAPI(object):
def __init__(self):
self.conf = Configs()
self.client = self.login()
self.win = None
self.list_type = None
self.playlists_list = None
self.updating_thread = None
if 'RESOURCEPATH' in os.environ:
self.cache = '{}/cache'.format(os.environ['RESOURCEPATH'])
else:
self.dirName = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.cache = os.path.join(self.dirName, 'cache')
pass
def __new__(cls, *args, **kw):
if not hasattr(cls, '_instance'):
orig = super(YandexAPI, cls)
cls._instance = orig.__new__(cls)
return cls._instance
def login(self, login=None, password=None):
if self.conf.get_attr("token") is not False:
client = Client().from_token(self.conf.get_attr("token"))
elif login is not None and password is not None:
client = captcha_key = captcha_image_url = captcha_answer = None
while not client:
try:
client = Client.from_credentials(login, password, captcha_answer, captcha_key)
except Captcha as e:
if e.captcha.x_captcha_url:
captcha_image_url = e.captcha.x_captcha_url
captcha_key = e.captcha.x_captcha_key
else:
print('Вы отправили ответ не посмотрев на картинку..')
captcha_answer = input(f'{captcha_image_url}\nВведите код с картинки: ')
token = client.token
self.conf.set_attr("token", token)
return client
else:
client = Client()
self.client = client
return client
def is_logged_in(self):
if self.client.me.account.display_name is None:
return False
else:
return True
def logout(self):
self.conf.remove_attr("token")
self.client = Client()
pass
def get_display_name(self):
return str(self.login().me.account.display_name)
def get_play_lists_list(self):
entities = self.client.landing(blocks="personalplaylists").blocks[0].entities
lists = []
for playlist in entities:
lists.append({
"name": playlist.data.data.title,
"type": playlist.data.type
})
self.playlists_list = lists
return lists
def preparation(self, list_type, win):
self.updating_thread = threading.Thread(target=self.update)
self.list_type = list_type
self.win = win
index = {
"date": date.today().__str__(),
"last_track_num": 1,
"tracks": []
}
if not os.path.exists('{}/{}/'.format(self.cache, list_type)):
os.mkdir('cache/{}'.format(list_type))
if not os.path.exists('{}/{}/index.json'.format(self.cache, list_type)):
with open('{}/{}/index.json'.format(self.cache, list_type), 'w+') as file:
json.dump(index, file, indent=4)
self.updating_thread.start()
else:
if self.is_need_update():
with open('{}/{}/index.json'.format(self.cache, list_type), 'w+') as file:
json.dump(index, file, indent=4)
self.updating_thread.start()
else:
wx.PostEvent(self.win, events.FirstTrackAppear(playlist_type=list_type))
playlist_title = ""
for playlist in self.playlists_list:
if playlist['type'] == list_type:
playlist_title = playlist['name']
wx.PostEvent(self.win, events.PlaylistReady(playlist_name=playlist_title, playlist_type=list_type))
return True
def is_need_update(self):
list_type = self.list_type
with open('{}/{}/index.json'.format(self.cache, list_type), 'r') as file:
index_date = datetime.strptime(json.load(file)['date'], '%Y-%m-%d').date()
if index_date == date.today():
return False
else:
return True
def update(self):
print("Starting update")
list_type = self.list_type
blocks = self.client.landing(blocks="personalplaylists").blocks[0].entities
playlist = ""
print("processing blocks")
for block in blocks:
if block.data.type == list_type:
playlist = block.data.data
tracks = self.client.users_playlists(playlist.kind, playlist.owner.uid).tracks
index_file = json.load(open('{}/{}/index.json'.format(self.cache, list_type), 'r'))
index = 1
print("processing tracks")
for track in tracks:
if index == 2:
wx.PostEvent(self.win, events.FirstTrackAppear(playlist_name=playlist.title, playlist_type=list_type))
full_track_info = track.track
index_file['tracks'].append({
"id": full_track_info.id,
"title": full_track_info.title,
"artist": full_track_info.artists[0]['name'],
"duration": full_track_info.duration_ms,
"num": index
})
with open('{}/{}/index.json'.format(self.cache, list_type), 'w+') as file:
json.dump(index_file, file)
track.track.download_cover('{}/{}/{}.png'.format(self.cache, list_type, index))
track.track.download('{}/{}/{}.mp3'.format(self.cache, list_type, index), codec="mp3", bitrate_in_kbps=320)
if index == 3:
break
index = index + 1
print("finishing updating")
wx.PostEvent(self.win, events.PlaylistReady(playlist_name=playlist.title, playlist_type=list_type))
return True
|
WebcamCapture.py
|
# import the necessary packages
from threading import Thread
import cv2
class WebcamVideoStream:
def __init__(self, cap):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cap
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, daemon=True).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
weather_station.py
|
import threading
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
import opcua
import logging
import open_weather
# todo: make server implementation of method work
# @uamethod
# def update_predictions_uamethod():
# """
# forces update of predictions in weather station
# """
# logging.log(logging.INFO, "updating predictions")
# weather_station.update_prediction()
class WeatherStation:
"""
Highest level representation of weather station.
Contains opc-ua server, weather api client and necessary methods.
"""
def __init__(self, opcua_url, server_name, api_key, place, frequency_of_fetching=30, address='localhost', port=80):
"""
:param opcua_url: url of the server endpoint to expose
:type opcua_url: string
:param server_name: name of the server
:type server_name: string
:param api_key: api key of the open_weather API,
needs to be obtained via registration of the user on open weather maps platform
:type api_key: string
:param place: localization in format "city,country code" e. g. "London,gb"
:type place: string
:param frequency_of_fetching: frequency of refreshing of weather measurements in seconds
:type frequency_of_fetching: int
"""
self.weather_fetcher = open_weather.Client(api_key, place)
self.address = address
self.port = port
self.server = opcua.Server()
self.server.set_endpoint(opcua_url)
self.server.set_server_name(server_name)
idx = self.server.register_namespace("namespace")
days, time_of_last_fetching = self.fetch_prediction()
time_node = self.server.nodes.objects.add_object(idx, "time")
self.time_holder = time_node.add_variable(idx, "last_fetching", time_of_last_fetching)
# todo: make server implementation of method work
# time_node.add_method(idx, "update_predictions_uamethod", update_predictions_uamethod)
self.object_variables = {}
for d in days.keys():
idx = idx + 1
obj = self.server.nodes.objects.add_object(idx, d)
self.object_variables[d] = {}
for k in days[d].keys():
variable = obj.add_variable(idx, k, days[d][k])
self.object_variables[d][k] = variable
self.frequency_of_fetching = frequency_of_fetching
# todo - it is work around not working opcua method
update_predictions = self.update_prediction
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
update_predictions()
self.wfile.write(b'Success!!!')
self.handler = HTTPRequestHandler
def fetch_prediction(self):
"""
fetches new predictions
:return: predictions {"day":prediction_values}
:rtype: {string:{string:int}}
possible prediction params and value types
"temperature": int [C°],
"humidity": int [%],
"pressure": int [hPA],
"wind_speed": int [m/s],
"wind_direction": int [°],
"clouds": int [%],
"conditions": string [status:string],
"time": int [int - UNIX TIME],
"""
logging.log(logging.INFO, "fetching data from https://openweathermap.org/api")
today, two_days_prediction, time_of_prediction = self.weather_fetcher.fetch_weather(2)
return {"today": today, "tomorrow": two_days_prediction[0],
"day_after": two_days_prediction[1]}, time_of_prediction
def update_prediction(self):
"""
fetches new predictions and updates opc-ua nodes with them
:return:
:rtype:
"""
logging.log(logging.INFO, "updating predictions")
prediction, time_of_prediction = self.fetch_prediction()
self.time_holder.set_value(time_of_prediction)
for day in self.object_variables.keys():
variables = self.object_variables[day]
for v in variables.keys():
variables[v].set_value(prediction[day][v])
def start_opcua_server(self):
"""
starts server and consecutive updates
in case of any client errors creates a delay,
leaving previous values as are with specified time
"""
logging.log(logging.INFO, "Weather station has started")
self.server.start()
while True:
try:
self.update_prediction()
finally:
time.sleep(self.frequency_of_fetching)
def start_web_server(self):
httpd = HTTPServer((self.address, self.port), self.handler)
print(httpd.serve_forever())
def start(self):
"""
Starts two threads
Necessary to expose rest api endpoint
"""
t1 = threading.Thread(target=self.start_opcua_server)
t2 = threading.Thread(target=self.start_web_server)
t2.start()
t1.start()
|
mcspm.py
|
# -*- coding:utf-8 -*-
from threading import Thread
import multiprocessing
import time
import sys
from core.mcservercore import MinecraftServer
from function.listener import EventListener
from function import console
class MinecraftServerProcessManager(multiprocessing.Process):
"""docstring for MinecraftServerProcessManager"""
def __init__(self):
multiprocessing.Process.__init__(self)
self.command_queue_center = {}
self.server_procss_center = {}
self.console_queue_mask = multiprocessing.Queue()
self.console_listener = None
self.event_listener = EventListener()
self.event_listener.on_listener('close',self._event_del_server)
def create_minecraft_server(self,servername,cwd,run_command):
''' 创建 Minecraft 服务器'''
self.event_listener.fire_event('open',servername,False)
mc_server = MinecraftServer(servername,cwd,run_command)
command_queue = mc_server.command_queue()
if servername in self.command_queue_center.keys():
return False
self.command_queue_center[servername] = command_queue
self.server_procss_center[servername] = mc_server
mc_server.set_cosole_queue(self.console_queue_mask)
mc_server.start()
def send_command(self,servername,command):
''' 发送命令到指定服务器 '''
if servername in self.command_queue_center.keys():
command_queue = self.command_queue_center[servername]
command_queue.put({
'command' : 'exec',
'msg' : command
})
return True
return False
def close_minecraft_server(self,servername,close_command = 'exit'):
''' 关闭某个 MC 服务器 '''
if servername in self.command_queue_center.keys():
#命令python程序启动的子Java程序退出
self.send_command(servername,close_command)
#命令读取线程退出
self.command_queue_center[servername].put({
'command' : 'exit'
});
#命令退出事件
#self.server_procss_center[servername].stop_process()
return True
return False
def _console_thread_loop(self):
''' 此线程负责写入控制台到 管理中心 的队列 '''
while True:
console_data = self.console_queue_mask.get();
command_msg = console_data['command']
#服务器控制输出信号
if command_msg == 'console':
self.event_listener.fire_event('console',[console_data['servername'],console_data['msg']])
#此服务器关闭发送的信号
if command_msg == 'close':
self.event_listener.fire_event('close',console_data['servername'],False)
def run(self):
console.log(0,'装载 MinecraftServerProcessManager 模块')
self._console_thread_loop = Thread(target=self._console_thread_loop)
self._console_thread_loop.start()
self._console_thread_loop.join()
def get_event_listener(self):
return self.event_listener;
def _event_del_server(self,servername):
''' 改变服务器在线状态 '''
try:
del self.server_procss_center[servername]
del self.command_queue_center[servername]
except KeyError:
pass
def get_online_server_info(self):
''' 获取所有在线服务器的信息 '''
return_dict = {}
for servername,mcserver in self.server_procss_center.items():
return_dict[servername] = mcserver.get_info()
return return_dict
|
word2vec_optimized.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec unbatched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does true SGD (i.e. no minibatching). To do this efficiently, custom
ops are used to sequentially process data within a 'batch'.
The key ops used are:
* skipgram custom op that does input processing.
* neg_train custom op that efficiently calculates and applies the gradient using
true SGD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
word2vec = tf.load_op_library(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'word2vec_ops.so'))
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model.")
flags.DEFINE_string(
"train_data", None,
"Training data. E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "Analogy questions. "
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.025, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 25,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 500,
"Numbers of training examples each step processes "
"(no minibatching).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# Where to write out summaries.
self.save_path = FLAGS.save_path
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def build_graph(self):
"""Build the model graph."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, current_epoch, total_words_processed,
examples, labels) = word2vec.skipgram_word2vec(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
# Declare all variables we need.
# Input words embedding: [vocab_size, emb_dim]
w_in = tf.Variable(
tf.random_uniform(
[opts.vocab_size,
opts.emb_dim], -0.5 / opts.emb_dim, 0.5 / opts.emb_dim),
name="w_in")
# Global step: scalar, i.e., shape [].
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name="w_out")
# Global step: []
global_step = tf.Variable(0, name="global_step")
# Linear learning rate decay.
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001,
1.0 - tf.cast(total_words_processed, tf.float32) / words_to_train)
# Training nodes.
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train_word2vec(w_in,
w_out,
examples,
labels,
lr,
vocab_count=opts.vocab_counts.tolist(),
num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.global_step = global_step
self._epoch = current_epoch
self._words = total_words_processed
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def build_eval_graph(self):
"""Build the evaluation graph."""
# Eval graph
opts = self._options
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._w_in, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, opts.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
# Properly initialize all variables.
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time = initial_words, time.time()
while True:
time.sleep(5) # Reports our progress once a while.
(epoch, step, words, lr) = self._session.run(
[self._epoch, self.global_step, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r" % (epoch, step,
lr, rate),
end="")
sys.stdout.flush()
if epoch != initial_epoch:
break
for t in workers:
t.join()
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
break
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
# Skip-Gram
|
monitored_session_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import glob
import os
import threading
import time
import tensorflow as tf
from tensorflow.contrib import testing
from tensorflow.python.training import monitored_session
class ScaffoldTest(tf.test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with tf.Graph().as_default():
scaffold = tf.train.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with tf.Graph().as_default():
scaffold = tf.train.Scaffold()
tf.Variable(1, name='my_var')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, tf.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, tf.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, tf.Operation))
self.assertTrue(isinstance(scaffold.saver, tf.train.Saver))
with self.test_session() as sess:
self.assertTrue(b'my_var' in sess.run(scaffold.ready_op))
sess.run([scaffold.init_op, scaffold.local_init_op])
self.assertEquals(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with tf.Graph().as_default():
scaffold = tf.train.Scaffold()
tf.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, tf.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, tf.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, tf.Operation))
self.assertTrue(isinstance(scaffold.saver, tf.train.Saver))
def test_caches_values(self):
with tf.Graph().as_default():
tf.Variable([1])
scaffold1 = tf.train.Scaffold()
scaffold1.finalize()
scaffold2 = tf.train.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with tf.Graph().as_default():
tf.Variable([1])
tf.add_to_collection(tf.GraphKeys.SAVERS, tf.train.Saver())
tf.add_to_collection(tf.GraphKeys.SAVERS, tf.train.Saver())
with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
tf.train.Scaffold().finalize()
def test_uses_passed_values(self):
with tf.Graph().as_default():
tf.Variable([1])
saver = tf.train.Saver()
scaffold = tf.train.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
local_init_op=6,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.local_init_op)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with tf.Graph().as_default():
tf.Variable([1])
tf.train.Scaffold().finalize()
with self.assertRaisesRegexp(RuntimeError,
'Graph is finalized and cannot be modified'):
tf.constant([0])
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class MonitoredTrainingSessionTest(tf.test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
with tf.train.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with tf.train.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_summaries(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
new_gstep = tf.assign_add(gstep, 1)
tf.summary.scalar('my_summary_tag', new_gstep * 2)
with tf.train.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
for _ in range(101): # 100 is default summary writing steps
session.run(new_gstep)
summaries = testing.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(tf.test.TestCase):
"""_WrappedSession tests."""
def test_properties(self):
with self.test_session() as sess:
tf.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEquals(sess.graph, wrapped_sess.graph)
self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
def test_should_stop_on_close(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_uses_check_stop(self):
with self.test_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_delegates_to_wrapped_session(self):
with self.test_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
def test_close_twice(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_run(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(tf.test.TestCase):
"""_CoordinatedSession tests."""
def test_properties(self):
with self.test_session() as sess:
tf.constant(0.0)
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
def test_should_stop_on_close(self):
with self.test_session() as sess:
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
def test_should_stop_on_coord_stop(self):
with self.test_session() as sess:
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
def test_stop_threads_on_close_after_exception(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
coord = tf.train.Coordinator()
threads = [threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = [threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
class AbortAtNSession(object):
"""A mock sessionthat aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise tf.errors.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class RecoverableSessionTest(tf.test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
def test_properties(self):
with self.test_session() as sess:
tf.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEquals(sess.graph, recoverable_sess.graph)
self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
def test_recovery(self):
with self.test_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = tf.constant(0)
v = tf.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class FakeHook(tf.train.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = Counter()
self.last_run_context = None
self.last_run_values = None
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
class HookedSessionTest(tf.test.TestCase):
def testRunPassesAllArguments(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = tf.constant([0], name='a_tensor')
sess.run(tf.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
sess.run(tf.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values, tf.train.SessionRunValues(results=None))
self.assertEqual(hook.last_run_context.original_args,
tf.train.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
tf.constant([0], name='a_tensor')
sess.run(tf.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
another_tensor = tf.constant([5], name='another_tensor')
third_tensor = tf.constant([10], name='third_tensor')
mock_hook.request = tf.train.SessionRunArgs([another_tensor])
mock_hook2.request = tf.train.SessionRunArgs([third_tensor])
sess.run(tf.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
b_tensor = tf.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [5]
})
mock_hook2.request = tf.train.SessionRunArgs(
None, feed_dict={
b_tensor: [10]
})
sess.run(tf.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
b_tensor = tf.constant([0], name='b_tensor')
c_tensor = tf.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [5]
})
mock_hook2.request = tf.train.SessionRunArgs(
None, feed_dict={
b_tensor: [10]
})
sess.run(tf.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
b_tensor = tf.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [5]
})
mock_hook2.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [10]
})
sess.run(tf.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
b_tensor = tf.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [5]
})
mock_hook2.request = tf.train.SessionRunArgs(
None, feed_dict={
b_tensor: [10]
})
sess.run(tf.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(tf.train.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class MonitoredSessionTest(tf.test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with tf.Graph().as_default():
a_var = tf.Variable(0)
with tf.train.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [tf.train.StopAtStepHook(last_step=3)]
scaffold = tf.train.Scaffold().finalize()
with tf.train.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = tf.train.ChiefSessionCreator(
tf.train.Scaffold(init_fn=load_ckpt))
hooks = [tf.train.StopAtStepHook(last_step=5)]
with tf.train.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [tf.train.StopAtStepHook(num_steps=3)]
scaffold = tf.train.Scaffold().finalize()
with tf.train.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = tf.train.ChiefSessionCreator(
scaffold=tf.train.Scaffold(init_fn=load_ckpt))
hooks = [tf.train.StopAtStepHook(num_steps=4)]
with tf.train.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
scaffold = tf.train.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [tf.train.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)]
with tf.train.MonitoredSession(
session_creator=tf.train.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with tf.train.MonitoredSession(
session_creator=tf.train.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_on_aborted_error(self):
# Tests that we silently retry on abort. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, tf.errors.AbortedError(None, None, 'Abort'))
with tf.train.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
scaffold = tf.train.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, tf.errors.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = tf.train.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with tf.train.MonitoredSession(
session_creator=tf.train.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, tf.errors.OutOfRangeError(None, None, 'EOI'))
session = tf.train.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = tf.train.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = tf.train.MonitoredSession(hooks=[hook])
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
session = tf.train.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
session = tf.train.MonitoredSession()
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
session = tf.train.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
session = tf.train.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
g = tf.Graph()
with g.as_default():
session = tf.train.MonitoredSession()
self.assertEqual(g, session.graph)
if __name__ == '__main__':
tf.test.main()
|
test_creator.py
|
from __future__ import absolute_import, unicode_literals
import difflib
import gc
import logging
import os
import stat
import subprocess
import sys
from itertools import product
from threading import Thread
import pytest
import six
from virtualenv.__main__ import run
from virtualenv.create.creator import DEBUG_SCRIPT, Creator, get_env_debug_info
from virtualenv.discovery.builtin import get_interpreter
from virtualenv.discovery.py_info import PythonInfo
from virtualenv.info import IS_PYPY, fs_supports_symlink
from virtualenv.pyenv_cfg import PyEnvCfg
from virtualenv.run import run_via_cli, session_via_cli
from virtualenv.util.path import Path
CURRENT = PythonInfo.current_system()
@pytest.mark.parametrize("sep", [i for i in (os.pathsep, os.altsep) if i is not None])
def test_os_path_sep_not_allowed(tmp_path, capsys, sep):
target = "{}{}".format(str(tmp_path / "a"), "{}b".format(sep))
err = _non_success_exit_code(capsys, target)
msg = (
"destination {!r} must not contain the path separator ({}) as this"
" would break the activation scripts".format(target, sep)
)
assert msg in err, err
def _non_success_exit_code(capsys, target):
with pytest.raises(SystemExit) as context:
run_via_cli(args=[target])
assert context.value.code != 0
out, err = capsys.readouterr()
assert not out, out
return err
def test_destination_exists_file(tmp_path, capsys):
target = tmp_path / "out"
target.write_text("")
err = _non_success_exit_code(capsys, str(target))
msg = "the destination {} already exists and is a file".format(str(target))
assert msg in err, err
@pytest.mark.skipif(sys.platform == "win32", reason="no chmod on Windows")
def test_destination_not_write_able(tmp_path, capsys):
target = tmp_path
prev_mod = target.stat().st_mode
target.chmod(0o444)
try:
err = _non_success_exit_code(capsys, str(target))
msg = "the destination . is not write-able at {}".format(str(target))
assert msg in err, err
finally:
target.chmod(prev_mod)
def cleanup_sys_path(paths):
from virtualenv.create.creator import HERE
paths = [Path(os.path.abspath(i)) for i in paths]
to_remove = [Path(HERE)]
if os.environ.get(str("PYCHARM_HELPERS_DIR")):
to_remove.append(Path(os.environ[str("PYCHARM_HELPERS_DIR")]).parent)
to_remove.append(Path(os.path.expanduser("~")) / ".PyCharm")
result = [i for i in paths if not any(str(i).startswith(str(t)) for t in to_remove)]
return result
@pytest.fixture(scope="session")
def system():
return get_env_debug_info(Path(CURRENT.system_executable), DEBUG_SCRIPT)
CURRENT_CREATORS = list(i for i in CURRENT.creators().key_to_class.keys() if i != "builtin")
_VENV_BUG_ON = (
IS_PYPY
and CURRENT.version_info[0:3] == (3, 6, 9)
and CURRENT.pypy_version_info[0:2] == [7, 3]
and CURRENT.platform == "linux"
)
@pytest.mark.parametrize(
"creator, method, isolated",
[
pytest.param(
*i,
marks=pytest.mark.xfail(
reason="https://bitbucket.org/pypy/pypy/issues/3159/pypy36-730-venv-fails-with-copies-on-linux",
strict=True,
)
)
if _VENV_BUG_ON and i[0] == "venv" and i[1] == "copies"
else i
for i in product(
CURRENT_CREATORS, (["copies"] + (["symlinks"] if fs_supports_symlink() else [])), ["isolated", "global"]
)
],
)
def test_create_no_seed(python, creator, isolated, system, coverage_env, special_name_dir, method):
dest = special_name_dir
cmd = [
"-v",
"-v",
"-p",
six.ensure_text(python),
six.ensure_text(str(dest)),
"--without-pip",
"--activators",
"",
"--creator",
creator,
"--{}".format(method),
]
if isolated == "global":
cmd.append("--system-site-packages")
result = run_via_cli(cmd)
coverage_env()
if IS_PYPY:
# pypy cleans up file descriptors periodically so our (many) subprocess calls impact file descriptor limits
# force a cleanup of these on system where the limit is low-ish (e.g. MacOS 256)
gc.collect()
content = list(result.creator.purelib.iterdir())
assert not content, "\n".join(six.ensure_text(str(i)) for i in content)
assert result.creator.env_name == six.ensure_text(dest.name)
debug = result.creator.debug
sys_path = cleanup_sys_path(debug["sys"]["path"])
system_sys_path = cleanup_sys_path(system["sys"]["path"])
our_paths = set(sys_path) - set(system_sys_path)
our_paths_repr = "\n".join(six.ensure_text(repr(i)) for i in our_paths)
# ensure we have at least one extra path added
assert len(our_paths) >= 1, our_paths_repr
# ensure all additional paths are related to the virtual environment
for path in our_paths:
msg = "\n{}\ndoes not start with {}\nhas:\n{}".format(
six.ensure_text(str(path)),
six.ensure_text(str(dest)),
"\n".join(six.ensure_text(str(p)) for p in system_sys_path),
)
assert str(path).startswith(str(dest)), msg
# ensure there's at least a site-packages folder as part of the virtual environment added
assert any(p for p in our_paths if p.parts[-1] == "site-packages"), our_paths_repr
# ensure the global site package is added or not, depending on flag
last_from_system_path = next(j for j in reversed(system_sys_path) if str(j).startswith(system["sys"]["prefix"]))
if isolated == "isolated":
assert last_from_system_path not in sys_path, "last from system sys path {} is in venv sys path:\n{}".format(
six.ensure_text(str(last_from_system_path)), "\n".join(six.ensure_text(str(j)) for j in sys_path)
)
else:
common = []
for left, right in zip(reversed(system_sys_path), reversed(sys_path)):
if left == right:
common.append(left)
else:
break
def list_to_str(iterable):
return [six.ensure_text(str(i)) for i in iterable]
assert common, "\n".join(difflib.unified_diff(list_to_str(sys_path), list_to_str(system_sys_path)))
@pytest.mark.skipif(not CURRENT.has_venv, reason="requires interpreter with venv")
def test_venv_fails_not_inline(tmp_path, capsys, mocker):
def _session_via_cli(args, options=None):
session = session_via_cli(args, options)
assert session.creator.can_be_inline is False
return session
mocker.patch("virtualenv.run.session_via_cli", side_effect=_session_via_cli)
before = tmp_path.stat().st_mode
cfg_path = tmp_path / "pyvenv.cfg"
cfg_path.write_text(six.ensure_text(""))
cfg = str(cfg_path)
try:
os.chmod(cfg, stat.S_IREAD | stat.S_IRGRP | stat.S_IROTH)
cmd = ["-p", str(CURRENT.executable), str(tmp_path), "--without-pip", "--creator", "venv"]
with pytest.raises(SystemExit) as context:
run(cmd)
assert context.value.code != 0
finally:
os.chmod(cfg, before)
out, err = capsys.readouterr()
assert "subprocess call failed for" in out, out
assert "Error:" in err, err
@pytest.mark.skipif(not sys.version_info[0] == 2, reason="python 2 only tests")
def test_debug_bad_virtualenv(tmp_path):
cmd = [str(tmp_path), "--without-pip"]
result = run_via_cli(cmd)
# if the site.py is removed/altered the debug should fail as no one is around to fix the paths
site_py = result.creator.stdlib / "site.py"
site_py.unlink()
# insert something that writes something on the stdout
site_py.write_text('import sys; sys.stdout.write(repr("std-out")); sys.stderr.write("std-err"); raise ValueError')
debug_info = result.creator.debug
assert debug_info["returncode"]
assert debug_info["err"].startswith("std-err")
assert "std-out" in debug_info["out"]
assert debug_info["exception"]
@pytest.mark.parametrize("creator", CURRENT_CREATORS)
@pytest.mark.parametrize("clear", [True, False], ids=["clear", "no_clear"])
def test_create_clear_resets(tmp_path, creator, clear, caplog):
caplog.set_level(logging.DEBUG)
if creator == "venv" and clear is False:
pytest.skip("venv without clear might fail")
marker = tmp_path / "magic"
cmd = [str(tmp_path), "--seeder", "app-data", "--without-pip", "--creator", creator, "-vvv"]
run_via_cli(cmd)
marker.write_text("") # if we a marker file this should be gone on a clear run, remain otherwise
assert marker.exists()
run_via_cli(cmd + (["--clear"] if clear else []))
assert marker.exists() is not clear
@pytest.mark.parametrize("creator", CURRENT_CREATORS)
@pytest.mark.parametrize("prompt", [None, "magic"])
def test_prompt_set(tmp_path, creator, prompt):
cmd = [str(tmp_path), "--seeder", "app-data", "--without-pip", "--creator", creator]
if prompt is not None:
cmd.extend(["--prompt", "magic"])
result = run_via_cli(cmd)
actual_prompt = tmp_path.name if prompt is None else prompt
cfg = PyEnvCfg.from_file(result.creator.pyenv_cfg.path)
if prompt is None:
assert "prompt" not in cfg
else:
if creator != "venv":
assert "prompt" in cfg, list(cfg.content.keys())
assert cfg["prompt"] == actual_prompt
@pytest.fixture(scope="session")
def cross_python(is_inside_ci):
spec = "{}{}".format(CURRENT.implementation, 2 if CURRENT.version_info.major == 3 else 3)
interpreter = get_interpreter(spec)
if interpreter is None:
msg = "could not find {}".format(spec)
if is_inside_ci:
raise RuntimeError(msg)
pytest.skip(msg=msg)
yield interpreter
@pytest.mark.slow
def test_cross_major(cross_python, coverage_env, tmp_path, current_fastest):
cmd = [
"-v",
"-v",
"-p",
six.ensure_text(cross_python.executable),
six.ensure_text(str(tmp_path)),
"--no-seed",
"--activators",
"",
"--creator",
current_fastest,
]
result = run_via_cli(cmd)
coverage_env()
env = PythonInfo.from_exe(str(result.creator.exe))
assert env.version_info.major != CURRENT.version_info.major
def test_create_parallel(tmp_path, monkeypatch):
monkeypatch.setenv(str("VIRTUALENV_OVERRIDE_APP_DATA"), str(tmp_path))
def create(count):
subprocess.check_call([sys.executable, "-m", "virtualenv", str(tmp_path / "venv{}".format(count))])
threads = [Thread(target=create, args=(i,)) for i in range(1, 4)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def test_creator_input_passed_is_abs(tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
result = Creator.validate_dest("venv")
assert str(result) == str(tmp_path / "venv")
def test_create_long_path(current_fastest, tmp_path):
if sys.platform == "darwin":
max_shebang_length = 512
else:
max_shebang_length = 127
# filenames can be at most 255 long on macOS, so split to to levels
count = max_shebang_length - len(str(tmp_path))
folder = tmp_path / ("a" * (count // 2)) / ("b" * (count // 2)) / "c"
folder.mkdir(parents=True)
cmd = [str(folder)]
result = run_via_cli(cmd)
subprocess.check_call([str(result.creator.script("pip")), "--version"])
|
MAC_Table_Cisco_Access_Layer_Threaded.py
|
#!/usr/bin/python3
import threading, os, time, sys ,socket, json
from multiprocessing import Queue
from getpass import getpass
from netmiko import ConnectHandler
from netmiko.ssh_exception import NetMikoTimeoutException
from paramiko.ssh_exception import SSHException
from netmiko.ssh_exception import AuthenticationException
HOST_IP={}
# Define username and password to login to all routers with
USER = 'cisco'
PASSWORD = 'Cisco123'
# Define Switches IPs
Switches=[]
with open('IPaddress_Switches.txt') as f:
for line in f:
line = line.strip()
try:
socket.inet_aton(line)
Switches.append(line)
except socket.error:
print ("Invalid IP address " + line)
print ("This is Switches IPs: \n")
print (Switches)
os.chdir("output/")
def ssh_session(switch, output_q):
# Place what you want each thread to do here, for example connect to SSH, run a command, get output
output_dict = {}
# hostname = switch
switch_d = {'device_type': 'cisco_ios', 'ip': switch, 'username': USER, 'password': PASSWORD,'timeout':15}
# SSH Iteration
try:
ssh_session = ConnectHandler(**switch_d)
except (AuthenticationException):
print("Wrong Authentication >>> "+ (switch_d['ip']) + "\n")
pass
except (NetMikoTimeoutException):
print("Timeout >>> "+ (switch_d['ip']) + "\n")
pass
except (EOFError):
print("EOF Error >>> "+ (switch_d['ip']) + "\n")
pass
except (SSHException):
print("Error SSH Exception >>> "+ (switch_d['ip']) + "\n")
pass
except Exception as unknown_error:
print("Unkown Error >>> "+ (switch_d['ip']) + "\n")
pass
else:
# Get the device Hostanme
hostname = ssh_session.send_command('show run | inc host')
hostname = hostname[9:]
hostname.split(" ")
# Make Dictionary
HOST_IP[hostname]=switch
# Extract Trunk Interfaces
Trunk_Int_List = []
Trunk_output_1 = ssh_session.send_command("show int status | inc trunk")
for line in Trunk_output_1.splitlines():
line_1 = line[:9]
line_1.strip()
Trunk_Int_List.append(line_1)
Trunk_Int_List_join = "|".join(Trunk_Int_List)
Trunk_Int_List_join = Trunk_Int_List_join.replace(" ","")
#print (Trunk_Int_List_join)
# Put the Output into a List to use it later with threading
output = ssh_session.send_command("show mac address | exclude All |" + Trunk_Int_List_join)
output_dict[hostname] = output
output_q.put(output_dict)
f = open((hostname), "w")
print((output), file=f) # python 3.x
f = open((hostname+"Trunk"), "w")
print(("show mac address | exclude All |" + Trunk_Int_List_join), file=f) # python 3.x
if __name__ == "__main__":
output_q = Queue()
# Start thread for each router in Switches list
for switch in Switches:
my_thread = threading.Thread(target=ssh_session, args=(switch, output_q))
my_thread.start()
my_thread.join()
# Save dic to file
json = json.dumps(HOST_IP)
f = open("dict.json","w+")
f.write(json)
f.close()
sys.exit(1)
|
debug.py
|
# -*- coding: utf-8 -*-
"""
debug.py - Functions to aid in debugging
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
"""
from __future__ import print_function
import sys, traceback, time, gc, re, types, weakref, inspect, os, cProfile, threading
from . import ptime
from numpy import ndarray
from .Qt import QtCore, QtGui
from .util.mutex import Mutex
from .util import cprint
__ftraceDepth = 0
def ftrace(func):
"""Decorator used for marking the beginning and end of function calls.
Automatically indents nested calls.
"""
def w(*args, **kargs):
global __ftraceDepth
pfx = " " * __ftraceDepth
print(pfx + func.__name__ + " start")
__ftraceDepth += 1
try:
rv = func(*args, **kargs)
finally:
__ftraceDepth -= 1
print(pfx + func.__name__ + " done")
return rv
return w
class Tracer(object):
"""
Prints every function enter/exit. Useful for debugging crashes / lockups.
"""
def __init__(self):
self.count = 0
self.stack = []
def trace(self, frame, event, arg):
self.count += 1
# If it has been a long time since we saw the top of the stack,
# print a reminder
if self.count % 1000 == 0:
print("----- current stack: -----")
for line in self.stack:
print(line)
if event == 'call':
line = " " * len(self.stack) + ">> " + self.frameInfo(frame)
print(line)
self.stack.append(line)
elif event == 'return':
self.stack.pop()
line = " " * len(self.stack) + "<< " + self.frameInfo(frame)
print(line)
if len(self.stack) == 0:
self.count = 0
return self.trace
def stop(self):
sys.settrace(None)
def start(self):
sys.settrace(self.trace)
def frameInfo(self, fr):
filename = fr.f_code.co_filename
funcname = fr.f_code.co_name
lineno = fr.f_lineno
callfr = sys._getframe(3)
callline = "%s %d" % (callfr.f_code.co_name, callfr.f_lineno)
args, _, _, value_dict = inspect.getargvalues(fr)
if len(args) and args[0] == 'self':
instance = value_dict.get('self', None)
if instance is not None:
cls = getattr(instance, '__class__', None)
if cls is not None:
funcname = cls.__name__ + "." + funcname
return "%s: %s %s: %s" % (callline, filename, lineno, funcname)
def warnOnException(func):
"""Decorator which catches/ignores exceptions and prints a stack trace."""
def w(*args, **kwds):
try:
func(*args, **kwds)
except:
printExc('Ignored exception:')
return w
def getExc(indent=4, prefix='| ', skip=1):
lines = (traceback.format_stack()[:-skip]
+ [" ---- exception caught ---->\n"]
+ traceback.format_tb(sys.exc_info()[2])
+ traceback.format_exception_only(*sys.exc_info()[:2]))
lines2 = []
for l in lines:
lines2.extend(l.strip('\n').split('\n'))
lines3 = [" "*indent + prefix + l for l in lines2]
return '\n'.join(lines3)
def printExc(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented exception backtrace
(This function is intended to be called within except: blocks)"""
exc = getExc(indent, prefix + ' ', skip=2)
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
print(exc)
print(" "*indent + prefix + '='*30 + '<<')
def printTrace(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented stack trace"""
trace = backtrace(1)
#exc = getExc(indent, prefix + ' ')
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
for line in trace.split('\n'):
print(" "*indent + prefix + " " + line)
print(" "*indent + prefix + '='*30 + '<<')
def backtrace(skip=0):
return ''.join(traceback.format_stack()[:-(skip+1)])
def listObjs(regex='Q', typ=None):
"""List all objects managed by python gc with class name matching regex.
Finds 'Q...' classes by default."""
if typ is not None:
return [x for x in gc.get_objects() if isinstance(x, typ)]
else:
return [x for x in gc.get_objects() if re.match(regex, type(x).__name__)]
def findRefPath(startObj, endObj, maxLen=8, restart=True, seen={}, path=None, ignore=None):
"""Determine all paths of object references from startObj to endObj"""
refs = []
if path is None:
path = [endObj]
if ignore is None:
ignore = {}
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
ignore[id(seen)] = None
prefix = " "*(8-maxLen)
#print prefix + str(map(type, path))
prefix += " "
if restart:
#gc.collect()
seen.clear()
gc.collect()
newRefs = [r for r in gc.get_referrers(endObj) if id(r) not in ignore]
ignore[id(newRefs)] = None
#fo = allFrameObjs()
#newRefs = []
#for r in gc.get_referrers(endObj):
#try:
#if r not in fo:
#newRefs.append(r)
#except:
#newRefs.append(r)
for r in newRefs:
#print prefix+"->"+str(type(r))
if type(r).__name__ in ['frame', 'function', 'listiterator']:
#print prefix+" FRAME"
continue
try:
if any([r is x for x in path]):
#print prefix+" LOOP", objChainString([r]+path)
continue
except:
print(r)
print(path)
raise
if r is startObj:
refs.append([r])
print(refPathString([startObj]+path))
continue
if maxLen == 0:
#print prefix+" END:", objChainString([r]+path)
continue
## See if we have already searched this node.
## If not, recurse.
tree = None
try:
cache = seen[id(r)]
if cache[0] >= maxLen:
tree = cache[1]
for p in tree:
print(refPathString(p+path))
except KeyError:
pass
ignore[id(tree)] = None
if tree is None:
tree = findRefPath(startObj, r, maxLen-1, restart=False, path=[r]+path, ignore=ignore)
seen[id(r)] = [maxLen, tree]
## integrate any returned results
if len(tree) == 0:
#print prefix+" EMPTY TREE"
continue
else:
for p in tree:
refs.append(p+[r])
#seen[id(r)] = [maxLen, refs]
return refs
def objString(obj):
"""Return a short but descriptive string for any object"""
try:
if type(obj) in [int, float]:
return str(obj)
elif isinstance(obj, dict):
if len(obj) > 5:
return "<dict {%s,...}>" % (",".join(list(obj.keys())[:5]))
else:
return "<dict {%s}>" % (",".join(list(obj.keys())))
elif isinstance(obj, str):
if len(obj) > 50:
return '"%s..."' % obj[:50]
else:
return obj[:]
elif isinstance(obj, ndarray):
return "<ndarray %s %s>" % (str(obj.dtype), str(obj.shape))
elif hasattr(obj, '__len__'):
if len(obj) > 5:
return "<%s [%s,...]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj[:5]]))
else:
return "<%s [%s]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj]))
else:
return "<%s %s>" % (type(obj).__name__, obj.__class__.__name__)
except:
return str(type(obj))
def refPathString(chain):
"""Given a list of adjacent objects in a reference path, print the 'natural' path
names (ie, attribute names, keys, and indexes) that follow from one object to the next ."""
s = objString(chain[0])
i = 0
while i < len(chain)-1:
#print " -> ", i
i += 1
o1 = chain[i-1]
o2 = chain[i]
cont = False
if isinstance(o1, list) or isinstance(o1, tuple):
if any([o2 is x for x in o1]):
s += "[%d]" % o1.index(o2)
continue
#print " not list"
if isinstance(o2, dict) and hasattr(o1, '__dict__') and o2 == o1.__dict__:
i += 1
if i >= len(chain):
s += ".__dict__"
continue
o3 = chain[i]
for k in o2:
if o2[k] is o3:
s += '.%s' % k
cont = True
continue
#print " not __dict__"
if isinstance(o1, dict):
try:
if o2 in o1:
s += "[key:%s]" % objString(o2)
continue
except TypeError:
pass
for k in o1:
if o1[k] is o2:
s += "[%s]" % objString(k)
cont = True
continue
#print " not dict"
#for k in dir(o1): ## Not safe to request attributes like this.
#if getattr(o1, k) is o2:
#s += ".%s" % k
#cont = True
#continue
#print " not attr"
if cont:
continue
s += " ? "
sys.stdout.flush()
return s
def objectSize(obj, ignore=None, verbose=False, depth=0, recursive=False):
"""Guess how much memory an object is using"""
ignoreTypes = ['MethodType', 'UnboundMethodType', 'BuiltinMethodType', 'FunctionType', 'BuiltinFunctionType']
ignoreTypes = [getattr(types, key) for key in ignoreTypes if hasattr(types, key)]
ignoreRegex = re.compile('(method-wrapper|Flag|ItemChange|Option|Mode)')
if ignore is None:
ignore = {}
indent = ' '*depth
try:
hash(obj)
hsh = obj
except:
hsh = "%s:%d" % (str(type(obj)), id(obj))
if hsh in ignore:
return 0
ignore[hsh] = 1
try:
size = sys.getsizeof(obj)
except TypeError:
size = 0
if isinstance(obj, ndarray):
try:
size += len(obj.data)
except:
pass
if recursive:
if type(obj) in [list, tuple]:
if verbose:
print(indent+"list:")
for o in obj:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', s)
size += s
elif isinstance(obj, dict):
if verbose:
print(indent+"list:")
for k in obj:
s = objectSize(obj[k], ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', k, s)
size += s
#elif isinstance(obj, QtCore.QObject):
#try:
#childs = obj.children()
#if verbose:
#print indent+"Qt children:"
#for ch in childs:
#s = objectSize(obj, ignore=ignore, verbose=verbose, depth=depth+1)
#size += s
#if verbose:
#print indent + ' +', ch.objectName(), s
#except:
#pass
#if isinstance(obj, types.InstanceType):
gc.collect()
if verbose:
print(indent+'attrs:')
for k in dir(obj):
if k in ['__dict__']:
continue
o = getattr(obj, k)
if type(o) in ignoreTypes:
continue
strtyp = str(type(o))
if ignoreRegex.search(strtyp):
continue
#if isinstance(o, types.ObjectType) and strtyp == "<type 'method-wrapper'>":
#continue
#if verbose:
#print indent, k, '?'
refs = [r for r in gc.get_referrers(o) if type(r) != types.FrameType]
if len(refs) == 1:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
size += s
if verbose:
print(indent + " +", k, s)
#else:
#if verbose:
#print indent + ' -', k, len(refs)
return size
class GarbageWatcher(object):
"""
Convenient dictionary for holding weak references to objects.
Mainly used to check whether the objects have been collect yet or not.
Example:
gw = GarbageWatcher()
gw['objName'] = obj
gw['objName2'] = obj2
gw.check()
"""
def __init__(self):
self.objs = weakref.WeakValueDictionary()
self.allNames = []
def add(self, obj, name):
self.objs[name] = obj
self.allNames.append(name)
def __setitem__(self, name, obj):
self.add(obj, name)
def check(self):
"""Print a list of all watched objects and whether they have been collected."""
gc.collect()
dead = self.allNames[:]
alive = []
for k in self.objs:
dead.remove(k)
alive.append(k)
print("Deleted objects:", dead)
print("Live objects:", alive)
def __getitem__(self, item):
return self.objs[item]
class Profiler(object):
"""Simple profiler allowing measurement of multiple time intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `PYQTGRAPHPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `PYQTGRAPHPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "pyqtgraph." prefix from the module.
"""
_profilers = os.environ.get("PYQTGRAPHPROFILE", None)
_profilers = _profilers.split(",") if _profilers is not None else []
_depth = 0
_msgs = []
disable = False # set this flag to disable all or individual profilers at runtime
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabledProfiler = DisabledProfiler()
def __new__(cls, msg=None, disabled='env', delayed=True):
"""Optionally create a new profiler based on caller's qualname.
"""
if disabled is True or (disabled == 'env' and len(cls._profilers) == 0):
return cls._disabledProfiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if disabled == 'env' and func_qualname not in cls._profilers: # don't do anything
return cls._disabledProfiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._markCount = 0
obj._finished = False
obj._firstTime = obj._lastTime = ptime.time()
obj._newMsg("> Entering " + obj._name)
return obj
def __call__(self, msg=None):
"""Register or print a new message with timing information.
"""
if self.disable:
return
if msg is None:
msg = str(self._markCount)
self._markCount += 1
newTime = ptime.time()
self._newMsg(" %s: %0.4f ms",
msg, (newTime - self._lastTime) * 1000)
self._lastTime = newTime
def mark(self, msg=None):
self(msg)
def _newMsg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
self.flush()
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
self._newMsg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush()
def flush(self):
if self._msgs:
print("\n".join([m[0]%m[1] for m in self._msgs]))
type(self)._msgs = []
def profile(code, name='profile_run', sort='cumulative', num=30):
"""Common-use for cProfile"""
cProfile.run(code, name)
stats = pstats.Stats(name)
stats.sort_stats(sort)
stats.print_stats(num)
return stats
#### Code for listing (nearly) all objects in the known universe
#### http://utcc.utoronto.ca/~cks/space/blog/python/GetAllObjects
# Recursively expand slist's objects
# into olist, using seen to track
# already processed objects.
def _getr(slist, olist, first=True):
i = 0
for e in slist:
oid = id(e)
typ = type(e)
if oid in olist or typ is int: ## or e in olist: ## since we're excluding all ints, there is no longer a need to check for olist keys
continue
olist[oid] = e
if first and (i%1000) == 0:
gc.collect()
tl = gc.get_referents(e)
if tl:
_getr(tl, olist, first=False)
i += 1
# The public function.
def get_all_objects():
"""Return a list of all live Python objects (excluding int and long), not including the list itself."""
gc.collect()
gcl = gc.get_objects()
olist = {}
_getr(gcl, olist)
del olist[id(olist)]
del olist[id(gcl)]
del olist[id(sys._getframe())]
return olist
def lookup(oid, objects=None):
"""Return an object given its ID, if it exists."""
if objects is None:
objects = get_all_objects()
return objects[oid]
class ObjTracker(object):
"""
Tracks all objects under the sun, reporting the changes between snapshots: what objects are created, deleted, and persistent.
This class is very useful for tracking memory leaks. The class goes to great (but not heroic) lengths to avoid tracking
its own internal objects.
Example:
ot = ObjTracker() # takes snapshot of currently existing objects
... do stuff ...
ot.diff() # prints lists of objects created and deleted since ot was initialized
... do stuff ...
ot.diff() # prints lists of objects created and deleted since last call to ot.diff()
# also prints list of items that were created since initialization AND have not been deleted yet
# (if done correctly, this list can tell you about objects that were leaked)
arrays = ot.findPersistent('ndarray') ## returns all objects matching 'ndarray' (string match, not instance checking)
## that were considered persistent when the last diff() was run
describeObj(arrays[0]) ## See if we can determine who has references to this array
"""
allObjs = {} ## keep track of all objects created and stored within class instances
allObjs[id(allObjs)] = None
def __init__(self):
self.startRefs = {} ## list of objects that exist when the tracker is initialized {oid: weakref}
## (If it is not possible to weakref the object, then the value is None)
self.startCount = {}
self.newRefs = {} ## list of objects that have been created since initialization
self.persistentRefs = {} ## list of objects considered 'persistent' when the last diff() was called
self.objTypes = {}
ObjTracker.allObjs[id(self)] = None
self.objs = [self.__dict__, self.startRefs, self.startCount, self.newRefs, self.persistentRefs, self.objTypes]
self.objs.append(self.objs)
for v in self.objs:
ObjTracker.allObjs[id(v)] = None
self.start()
def findNew(self, regex):
"""Return all objects matching regex that were considered 'new' when the last diff() was run."""
return self.findTypes(self.newRefs, regex)
def findPersistent(self, regex):
"""Return all objects matching regex that were considered 'persistent' when the last diff() was run."""
return self.findTypes(self.persistentRefs, regex)
def start(self):
"""
Remember the current set of objects as the comparison for all future calls to diff()
Called automatically on init, but can be called manually as well.
"""
refs, count, objs = self.collect()
for r in self.startRefs:
self.forgetRef(self.startRefs[r])
self.startRefs.clear()
self.startRefs.update(refs)
for r in refs:
self.rememberRef(r)
self.startCount.clear()
self.startCount.update(count)
#self.newRefs.clear()
#self.newRefs.update(refs)
def diff(self, **kargs):
"""
Compute all differences between the current object set and the reference set.
Print a set of reports for created, deleted, and persistent objects
"""
refs, count, objs = self.collect() ## refs contains the list of ALL objects
## Which refs have disappeared since call to start() (these are only displayed once, then forgotten.)
delRefs = {}
for i in list(self.startRefs.keys()):
if i not in refs:
delRefs[i] = self.startRefs[i]
del self.startRefs[i]
self.forgetRef(delRefs[i])
for i in list(self.newRefs.keys()):
if i not in refs:
delRefs[i] = self.newRefs[i]
del self.newRefs[i]
self.forgetRef(delRefs[i])
#print "deleted:", len(delRefs)
## Which refs have appeared since call to start() or diff()
persistentRefs = {} ## created since start(), but before last diff()
createRefs = {} ## created since last diff()
for o in refs:
if o not in self.startRefs:
if o not in self.newRefs:
createRefs[o] = refs[o] ## object has been created since last diff()
else:
persistentRefs[o] = refs[o] ## object has been created since start(), but before last diff() (persistent)
#print "new:", len(newRefs)
## self.newRefs holds the entire set of objects created since start()
for r in self.newRefs:
self.forgetRef(self.newRefs[r])
self.newRefs.clear()
self.newRefs.update(persistentRefs)
self.newRefs.update(createRefs)
for r in self.newRefs:
self.rememberRef(self.newRefs[r])
#print "created:", len(createRefs)
## self.persistentRefs holds all objects considered persistent.
self.persistentRefs.clear()
self.persistentRefs.update(persistentRefs)
print("----------- Count changes since start: ----------")
c1 = count.copy()
for k in self.startCount:
c1[k] = c1.get(k, 0) - self.startCount[k]
typs = list(c1.keys())
typs.sort(key=lambda a: c1[a])
for t in typs:
if c1[t] == 0:
continue
num = "%d" % c1[t]
print(" " + num + " "*(10-len(num)) + str(t))
print("----------- %d Deleted since last diff: ------------" % len(delRefs))
self.report(delRefs, objs, **kargs)
print("----------- %d Created since last diff: ------------" % len(createRefs))
self.report(createRefs, objs, **kargs)
print("----------- %d Created since start (persistent): ------------" % len(persistentRefs))
self.report(persistentRefs, objs, **kargs)
def __del__(self):
self.startRefs.clear()
self.startCount.clear()
self.newRefs.clear()
self.persistentRefs.clear()
del ObjTracker.allObjs[id(self)]
for v in self.objs:
del ObjTracker.allObjs[id(v)]
@classmethod
def isObjVar(cls, o):
return type(o) is cls or id(o) in cls.allObjs
def collect(self):
print("Collecting list of all objects...")
gc.collect()
objs = get_all_objects()
frame = sys._getframe()
del objs[id(frame)] ## ignore the current frame
del objs[id(frame.f_code)]
ignoreTypes = [int]
refs = {}
count = {}
for k in objs:
o = objs[k]
typ = type(o)
oid = id(o)
if ObjTracker.isObjVar(o) or typ in ignoreTypes:
continue
try:
ref = weakref.ref(obj)
except:
ref = None
refs[oid] = ref
typ = type(o)
typStr = typeStr(o)
self.objTypes[oid] = typStr
ObjTracker.allObjs[id(typStr)] = None
count[typ] = count.get(typ, 0) + 1
print("All objects: %d Tracked objects: %d" % (len(objs), len(refs)))
return refs, count, objs
def forgetRef(self, ref):
if ref is not None:
del ObjTracker.allObjs[id(ref)]
def rememberRef(self, ref):
## Record the address of the weakref object so it is not included in future object counts.
if ref is not None:
ObjTracker.allObjs[id(ref)] = None
def lookup(self, oid, ref, objs=None):
if ref is None or ref() is None:
try:
obj = lookup(oid, objects=objs)
except:
obj = None
else:
obj = ref()
return obj
def report(self, refs, allobjs=None, showIDs=False):
if allobjs is None:
allobjs = get_all_objects()
count = {}
rev = {}
for oid in refs:
obj = self.lookup(oid, refs[oid], allobjs)
if obj is None:
typ = "[del] " + self.objTypes[oid]
else:
typ = typeStr(obj)
if typ not in rev:
rev[typ] = []
rev[typ].append(oid)
c = count.get(typ, [0,0])
count[typ] = [c[0]+1, c[1]+objectSize(obj)]
typs = list(count.keys())
typs.sort(key=lambda a: count[a][1])
for t in typs:
line = " %d\t%d\t%s" % (count[t][0], count[t][1], t)
if showIDs:
line += "\t"+",".join(map(str,rev[t]))
print(line)
def findTypes(self, refs, regex):
allObjs = get_all_objects()
ids = {}
objs = []
r = re.compile(regex)
for k in refs:
if r.search(self.objTypes[k]):
objs.append(self.lookup(k, refs[k], allObjs))
return objs
def describeObj(obj, depth=4, path=None, ignore=None):
"""
Trace all reference paths backward, printing a list of different ways this object can be accessed.
Attempts to answer the question "who has a reference to this object"
"""
if path is None:
path = [obj]
if ignore is None:
ignore = {} ## holds IDs of objects used within the function.
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
printed=False
for ref in refs:
if id(ref) in ignore:
continue
if id(ref) in list(map(id, path)):
print("Cyclic reference: " + refPathString([ref]+path))
printed = True
continue
newPath = [ref]+path
if len(newPath) >= depth:
refStr = refPathString(newPath)
if '[_]' not in refStr: ## ignore '_' references generated by the interactive shell
print(refStr)
printed = True
else:
describeObj(ref, depth, newPath, ignore)
printed = True
if not printed:
print("Dead end: " + refPathString(path))
def typeStr(obj):
"""Create a more useful type string by making <instance> types report their class."""
typ = type(obj)
if typ == getattr(types, 'InstanceType', None):
return "<instance of %s>" % obj.__class__.__name__
else:
return str(typ)
def searchRefs(obj, *args):
"""Pseudo-interactive function for tracing references backward.
**Arguments:**
obj: The initial object from which to start searching
args: A set of string or int arguments.
each integer selects one of obj's referrers to be the new 'obj'
each string indicates an action to take on the current 'obj':
t: print the types of obj's referrers
l: print the lengths of obj's referrers (if they have __len__)
i: print the IDs of obj's referrers
o: print obj
ro: return obj
rr: return list of obj's referrers
Examples::
searchRefs(obj, 't') ## Print types of all objects referring to obj
searchRefs(obj, 't', 0, 't') ## ..then select the first referrer and print the types of its referrers
searchRefs(obj, 't', 0, 't', 'l') ## ..also print lengths of the last set of referrers
searchRefs(obj, 0, 1, 'ro') ## Select index 0 from obj's referrer, then select index 1 from the next set of referrers, then return that object
"""
ignore = {id(sys._getframe()): None}
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
for a in args:
#fo = allFrameObjs()
#refs = [r for r in refs if r not in fo]
if type(a) is int:
obj = refs[a]
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
elif a == 't':
print(list(map(typeStr, refs)))
elif a == 'i':
print(list(map(id, refs)))
elif a == 'l':
def slen(o):
if hasattr(o, '__len__'):
return len(o)
else:
return None
print(list(map(slen, refs)))
elif a == 'o':
print(obj)
elif a == 'ro':
return obj
elif a == 'rr':
return refs
def allFrameObjs():
"""Return list of frame objects in current stack. Useful if you want to ignore these objects in refernece searches"""
f = sys._getframe()
objs = []
while f is not None:
objs.append(f)
objs.append(f.f_code)
#objs.append(f.f_locals)
#objs.append(f.f_globals)
#objs.append(f.f_builtins)
f = f.f_back
return objs
def findObj(regex):
"""Return a list of objects whose typeStr matches regex"""
allObjs = get_all_objects()
objs = []
r = re.compile(regex)
for i in allObjs:
obj = allObjs[i]
if r.search(typeStr(obj)):
objs.append(obj)
return objs
def listRedundantModules():
"""List modules that have been imported more than once via different paths."""
mods = {}
for name, mod in sys.modules.items():
if not hasattr(mod, '__file__'):
continue
mfile = os.path.abspath(mod.__file__)
if mfile[-1] == 'c':
mfile = mfile[:-1]
if mfile in mods:
print("module at %s has 2 names: %s, %s" % (mfile, name, mods[mfile]))
else:
mods[mfile] = name
def walkQObjectTree(obj, counts=None, verbose=False, depth=0):
"""
Walk through a tree of QObjects, doing nothing to them.
The purpose of this function is to find dead objects and generate a crash
immediately rather than stumbling upon them later.
Prints a count of the objects encountered, for fun. (or is it?)
"""
if verbose:
print(" "*depth + typeStr(obj))
report = False
if counts is None:
counts = {}
report = True
typ = str(type(obj))
try:
counts[typ] += 1
except KeyError:
counts[typ] = 1
for child in obj.children():
walkQObjectTree(child, counts, verbose, depth+1)
return counts
QObjCache = {}
def qObjectReport(verbose=False):
"""Generate a report counting all QObjects and their types"""
global qObjCache
count = {}
for obj in findObj('PyQt'):
if isinstance(obj, QtCore.QObject):
oid = id(obj)
if oid not in QObjCache:
QObjCache[oid] = typeStr(obj) + " " + obj.objectName()
try:
QObjCache[oid] += " " + obj.parent().objectName()
QObjCache[oid] += " " + obj.text()
except:
pass
print("check obj", oid, str(QObjCache[oid]))
if obj.parent() is None:
walkQObjectTree(obj, count, verbose)
typs = list(count.keys())
typs.sort()
for t in typs:
print(count[t], "\t", t)
class PrintDetector(object):
"""Find code locations that print to stdout."""
def __init__(self):
self.stdout = sys.stdout
sys.stdout = self
def remove(self):
sys.stdout = self.stdout
def __del__(self):
self.remove()
def write(self, x):
self.stdout.write(x)
traceback.print_stack()
def flush(self):
self.stdout.flush()
def listQThreads():
"""Prints Thread IDs (Qt's, not OS's) for all QThreads."""
thr = findObj('[Tt]hread')
thr = [t for t in thr if isinstance(t, QtCore.QThread)]
import sip
for t in thr:
print("--> ", t)
print(" Qt ID: 0x%x" % sip.unwrapinstance(t))
def pretty(data, indent=''):
"""Format nested dict/list/tuple structures into a more human-readable string
This function is a bit better than pprint for displaying OrderedDicts.
"""
ret = ""
ind2 = indent + " "
if isinstance(data, dict):
ret = indent+"{\n"
for k, v in data.iteritems():
ret += ind2 + repr(k) + ": " + pretty(v, ind2).strip() + "\n"
ret += indent+"}\n"
elif isinstance(data, list) or isinstance(data, tuple):
s = repr(data)
if len(s) < 40:
ret += indent + s
else:
if isinstance(data, list):
d = '[]'
else:
d = '()'
ret = indent+d[0]+"\n"
for i, v in enumerate(data):
ret += ind2 + str(i) + ": " + pretty(v, ind2).strip() + "\n"
ret += indent+d[1]+"\n"
else:
ret += indent + repr(data)
return ret
class ThreadTrace(object):
"""
Used to debug freezing by starting a new thread that reports on the
location of other threads periodically.
"""
def __init__(self, interval=10.0):
self.interval = interval
self.lock = Mutex()
self._stop = False
self.start()
def stop(self):
with self.lock:
self._stop = True
def start(self, interval=None):
if interval is not None:
self.interval = interval
self._stop = False
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def run(self):
while True:
with self.lock:
if self._stop is True:
return
print("\n============= THREAD FRAMES: ================")
for id, frame in sys._current_frames().items():
if id == threading.current_thread().ident:
continue
print("<< thread %d >>" % id)
traceback.print_stack(frame)
print("===============================================\n")
time.sleep(self.interval)
class ThreadColor(object):
"""
Wrapper on stdout/stderr that colors text by the current thread ID.
*stream* must be 'stdout' or 'stderr'.
"""
colors = {}
lock = Mutex()
def __init__(self, stream):
self.stream = getattr(sys, stream)
self.err = stream == 'stderr'
setattr(sys, stream, self)
def write(self, msg):
with self.lock:
cprint.cprint(self.stream, self.color(), msg, -1, stderr=self.err)
def flush(self):
with self.lock:
self.stream.flush()
def color(self):
tid = threading.current_thread()
if tid not in self.colors:
c = (len(self.colors) % 15) + 1
self.colors[tid] = c
return self.colors[tid]
def enableFaulthandler():
""" Enable faulthandler for all threads.
If the faulthandler package is available, this function disables and then
re-enables fault handling for all threads (this is necessary to ensure any
new threads are handled correctly), and returns True.
If faulthandler is not available, then returns False.
"""
try:
import faulthandler
# necessary to disable first or else new threads may not be handled.
faulthandler.disable()
faulthandler.enable(all_threads=True)
return True
except ImportError:
return False
|
search_ip.py
|
# !/bin/sur/
# coding:utf-8
# author:Wisdom_Tree
# search alive ip address through threading
from threading import Thread,activeCount
import os
def cmd(ip,ip_live):
#print(ip)
cmd=os.popen("ping "+ip+' -n 1 -w 10')
#time.sleep(0.030)
re=cmd.read()
re=re.find('TTL')
if re!=(-1):
ip_live.append(ip)
print(ip+" alive")
def search_all_ip(ip,ip_live):
# print(ip)
# if activeCount()<200:
ping=Thread(target=cmd,args=(ip,ip_live))
ping.start()
ping.join()
|
image_app_core.py
|
"""The flask/webserver part is slightly independent of the behavior,
allowing the user to "tune in" to see, but should not stop the
robot running"""
import time
from multiprocessing import Process, Queue
from flask import Flask, render_template, Response
app = Flask(__name__)
control_queue = Queue()
display_queue = Queue(maxsize=2)
display_template = 'image_server'
@app.route('/')
def index():
return render_template(display_template)
def frame_generator():
"""This is our main video feed"""
while True:
# at most 20 fps
time.sleep(0.05)
# Get (wait until we have data)
encoded_bytes = display_queue.get()
# Need to turn this into http multipart data.
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + encoded_bytes + b'\r\n')
@app.route('/display')
def display():
return Response(frame_generator(),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/control/<control_name>')
def control(control_name):
control_queue.put(control_name)
return Response('queued')
def start_server_process(template_name):
"""Start the process, call .terminate to close it"""
global display_template
display_template = template_name
# app.debug=True
# app.use_reloader = False
server = Process(target=app.run, kwargs={"host": "0.0.0.0", "port": 5001})
server.daemon = True
server.start()
return server
def put_output_image(encoded_bytes):
"""Queue an output image"""
if display_queue.empty():
display_queue.put(encoded_bytes)
def get_control_instruction():
"""Get control instructions from the web app, if any"""
if control_queue.empty():
# nothing
return None
else:
return control_queue.get()
|
thread_rlock.py
|
###################################
# File Name : thread_rlock.py
###################################
# !/usr/bin/python3
import time
import logging
import threading
logging.basicConfig(level=logging.DEBUG, format="(%(threadName)s) %(message)s")
RESOURCE = 0
def set_reverse(lock):
logging.debug("Start batch")
with lock:
logging.debug("Grab lock!")
if RESOURCE == 0:
set_one(lock, True)
else:
set_zero(lock, True)
logging.debug("Reversed")
def set_zero(lock, end=False):
logging.debug("Start set zero")
while True:
with lock:
global RESOURCE
RESOURCE = 0
logging.debug("Grab lock and set RESOURCE to %d." % RESOURCE)
time.sleep(0.5)
time.sleep(1)
if end:
break
def set_one(lock, end=False):
logging.debug("Start set one")
while True:
with lock:
global RESOURCE
RESOURCE = 1
logging.debug("Grab lock and set RESOURCE to %d." % RESOURCE)
time.sleep(0.5)
time.sleep(1)
if end:
break
def main():
lock = threading.RLock()
zero = threading.Thread(target=set_zero, name="zero", args=(lock,))
zero.setDaemon(True)
zero.start()
one = threading.Thread(target=set_one, name="one", args=(lock,))
one.setDaemon(True)
one.start()
time.sleep(6)
reverse = threading.Thread(target=set_reverse, name="reverse", args=(lock,))
reverse.start()
if __name__ == "__main__":
main()
|
Client.py
|
# © 2021 Liran Smadja. All rights reserved.
import time
import socket
import threading
from Observable import Observable
from Crypto.Cipher import AES
from Protocol import *
# encryption key
SECRET_KEY = b'\xf8[\xd6\t<\xd8\x04a5siif\x93\xdc\xe0'
IV = b'\x8e;\xf21bB\x0c\x95\x93\xce\xe9J3,\x04\xdd'
class ClientTCP(Observable):
def __init__(self):
Observable.__init__(self)
self.server_ip = "167.172.181.78"
self.server_port = 5678
self.max_msg_length = 2048
self.client_socket = None
self.db_waiting_response = True
self.isTerminated = False
self.client_db_info = {}
def setup(self) -> None:
"""
Initialize Client TCP socket protocol.
Initialize connection to server.
:return: None, otherwise raise an error.
"""
try:
self.notify("GRAPHICS_LOAD")
# server connection
self.notify("CONNECT")
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.connect((self.server_ip, self.server_port))
self.notify("CONNECTED")
# client waiting indefinitely to receive messages
threading.Thread(target=self.recv_msg).start()
self.notify("CLIENT_DB_CONNECT")
while self.db_waiting_response:
self.send_msg(PROTOCOLS["database_status"], "")
time.sleep(1)
except ConnectionError:
self.notify("TIMEOUT")
def recv_msg(self) -> None:
"""
Receive messages from server.
Runs infinitely thorough seperated Thread.
:return: None
"""
while not self.isTerminated:
try:
encrypted_data = self.client_socket.recv(self.max_msg_length)
decrypted_data = decryptTransmission(encrypted_data)
self.serverTransmission(self.client_socket, decrypted_data)
except ConnectionAbortedError:
self.isTerminated = True
self.notify(PROTOCOLS["server_offline"], "")
except ConnectionResetError:
self.notify(PROTOCOLS["server_offline"], "")
def send_msg(self, cmd: typing.AnyStr, msg: typing.AnyStr) -> None:
"""
Send message to server.
:param cmd: Identify by protocol with (command)
:param msg: String that contains the message.
:return: None
"""
# if client socket is not closed
if not self.isTerminated:
encryptTransmission(self.client_socket, build_message(cmd, msg))
def serverTransmission(self, client_socket: socket, message) -> None:
"""
Receive message from server that contains (command) to follow.
:param client_socket: Client (socket) obj.
:param message: String message.
:return: None.
"""
cmd = message[0]
msg = message[1]
if cmd == "DB_CONNECTION_STATUS":
if msg == "ALIVE":
self.notify("CLIENT_DB_CONNECTED")
else:
self.notify("DB_CONNECTION_ERROR")
self.db_waiting_response = False
if cmd == "CLIENT_INFO":
client_data = split_data(msg, 8)
self.client_db_info["id"] = client_data[0]
self.client_db_info["username"] = client_data[1]
self.client_db_info["password"] = client_data[2]
self.client_db_info["online"] = client_data[3]
self.client_db_info["ip_address"] = client_data[4]
self.client_db_info["avatar"] = client_data[5]
self.client_db_info["status"] = client_data[6]
self.client_db_info["room"] = client_data[7]
self.client_db_info["color"] = client_data[8]
if cmd == "LOGIN_OK":
self.notify("LOGIN_OK")
if cmd == "LOGIN_ERROR":
self.notify("LOGIN_ERROR")
if cmd == "ONLINE_USERS":
self.notify("ONLINE_USERS", msg)
if cmd == "MESSAGE_TO_CLIENT":
self.notify("MESSAGE_TO_CLIENT", msg)
if cmd == "CHAT_ROOMS_NAMES":
self.notify("CHAT_ROOMS_NAMES", msg)
if cmd == "CHAT_ROOMS_INFO":
self.notify("CHAT_ROOMS_INFO", msg)
if cmd == "BOT_USER_LOGGED_IN":
self.notify("BOT_USER_LOGGED_IN", msg)
if cmd == "BOT_USER_LOGGED_OUT":
self.notify("BOT_USER_LOGGED_OUT", msg)
if cmd == "REPLACE_USER_AVATAR":
self.notify("REPLACE_USER_AVATAR", msg)
if cmd == "REPLACE_USERNAME_COLOR":
self.notify("REPLACE_USERNAME_COLOR", msg)
if cmd == "REPLACE_USER_STATUS":
self.notify("REPLACE_USER_STATUS", msg)
if cmd == "REGISTER_USER":
self.notify("REGISTER_USER", msg)
if cmd == "IS_SERVER_RUNNING":
pass
def encryptTransmission(client_socket: socket, msg: typing.AnyStr) -> None:
"""
Encrypt data and send it to server.
:param client_socket: client socket (Socket)
:param msg: parsed message
:return: None
"""
# complete for missing bytes
missing_len = 16 - (len(msg) % 16)
msg += '~' * missing_len
# create encryptor
encryptor = AES.new(SECRET_KEY, AES.MODE_CBC, IV)
# send encoded message --> encrypted message to server.
encoded_message = msg.encode('cp424')
encrypted_message = encryptor.encrypt(encoded_message)
client_socket.send(encrypted_message)
def decryptTransmission(data: typing.AnyStr) -> tuple:
"""
Decrypt data from server.
:param data: encrypted string (bytes)
:return: (cmd, msg) tuple.
"""
# create decrypter
decrypter = AES.new(SECRET_KEY, AES.MODE_CBC, IV)
# decrypt--> decode--> parse data
decrypted_data = decrypter.decrypt(data)
decoded_data = decrypted_data.decode('cp424')
justify_data = decoded_data.replace('~', '')
parsed_data = parse_message(justify_data)
return parsed_data
if __name__ == "__main__":
client = ClientTCP()
client.setup()
# © 2021 Liran Smadja. All rights reserved.
|
trajectory_player.py
|
################################################################################
# Copyright 2022 FZI Research Center for Information Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
from arne_skill_pipeline.msg import State
import threading
import quaternion
class TrajectoryPlayer(object):
""" Publish trajectories to ROS topics for robot control
This mechanism's primarily application is to turn a freshly created
trajectory into robot control commands.
The trajectory usually comes from the skill pipeline and is user
parameterized with a duration, which is implicitly given by the timestamps
of each point. Since `cartesian_controllers` are used for robot control,
this player continuously publishes the trajectory as discrete target poses
for the robot's end-effector and its gripper.
"""
def __init__(self, publisher):
""" Initialize the player with an ArNe State publisher
Passing the publisher is important to not become a rosnode ourselves.
"""
self.pub = publisher
self.paused = False
self.stopped = True
def _publish(self, trajectory, done_cb):
""" Publish the trajectory as discrete State topics to ROS
We use a timer object to publish trajectory waypoints in equidistant
time steps. This function returns once the trajectory is finished or
once the player has been stopped.
"""
msg = State()
idx = 0
def finished():
return idx >= trajectory.nr_points or self.stopped
def _do_publish(event):
nonlocal idx
if not self.paused and not finished():
msg.header.stamp = rospy.Time.now()
msg.pose.position.x = trajectory.states[idx][0]
msg.pose.position.y = trajectory.states[idx][1]
msg.pose.position.z = trajectory.states[idx][2]
# There's no plausibility check on the skill's generalized trajectory,
# so normalization is required here for the orientation quaternion.
q = quaternion.quaternion(
trajectory.states[idx][6], # w
trajectory.states[idx][3], # x
trajectory.states[idx][4], # y
trajectory.states[idx][5], # z
).normalized()
msg.pose.orientation.x = q.x
msg.pose.orientation.y = q.y
msg.pose.orientation.z = q.z
msg.pose.orientation.w = q.w
msg.gripper.data = trajectory.states[idx][7]
self.pub.publish(msg)
idx += 1
# Trajectories have equidistant time spacing
period = rospy.Duration(trajectory.duration / trajectory.nr_points)
timer = rospy.Timer(period, _do_publish)
while self.play_thread.is_alive() and not finished():
rospy.sleep(0.1)
timer.shutdown()
# Finish with caller-specified lambda
if done_cb is not None:
done_cb()
def play(self, trajectory, done_cb=None):
""" Play the trajectory for robot control
This function is non-blocking.
Each trajectory starts with the robot's current pose.
The high-level code using this player must make sure that each
trajectory is adequately parameterized before replay.
We require a trajectory on each call to underline this importance and
do not provide any asynchronous load or read methods for this reason.
This method does the following:
- Preempt old trajectories with new ones
- Continuously publish to Ros topics in a separate thread
- Call `done_cb` when finished
"""
# Preempt any playing trajectory.
self.stop()
# Start publishing in a separate thread
self.stopped = False
self.play_thread = threading.Thread(target=self._publish, args=(trajectory, done_cb), daemon=True)
self.play_thread.start()
return self.play_thread.is_alive()
def toggle_pause(self):
""" Pause or unpause the current trajectory replay
"""
self.paused = not self.paused
return True
def stop(self):
""" Stop trajectory replay
"""
if hasattr(self, 'play_thread'):
self.stopped = True
self.paused = False
self.play_thread.join()
return not self.play_thread.is_alive()
else:
return True
|
jobs.py
|
# BSD 3-Clause License
#
# Copyright (c) 2017, Zackary Parsons
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing
from queue import Queue
from threading import Thread
from time import sleep
from typing import Callable, Tuple
from . import env
from . import progress
_job_queue = Queue() # FIFO job queue
_cpu_count = multiprocessing.cpu_count() # Max threads = _cpu_count
class Job:
target = None # Function to call when running in thread
args = None # Arguments to pass to function
return_val = None # Return value of the target
has_run = False # True if the job has been run
def run(self):
self.return_val = self.target(*self.args)
self.has_run = True
return self.return_val
def __init__(self, target: Callable, args: Tuple = ()):
self.target=target
self.args=args
def _job_worker():
while not _job_queue.empty():
job = _job_queue.get()
job.run()
_job_queue.task_done()
def push_job(new_job: Job):
""" Push a new job into the FIFO queue """
_job_queue.put(new_job)
def start_jobs(max_threads: int = 0):
""" Start running the jobs in the job queue """
global _cpu_count
if max_threads <= 0:
max_threads = _cpu_count
for i in range(max_threads):
t = Thread(target=_job_worker)
t.daemon = True
t.start()
def wait_done(show_progress=True):
""" Wait until all jobs have finished running """
complete_prog_bar = False # If true, set progress to 100% when done
if show_progress and not env.get("VERBOSE"):
complete_prog_bar = True
qsize_init = _job_queue.qsize()
progress.update(0)
while not _job_queue.empty():
prog = (qsize_init - _job_queue.qsize()) / qsize_init
progress.update(prog)
sleep(0.001)
_job_queue.join()
if complete_prog_bar:
progress.update(1)
|
pyusb_backend.py
|
# pyOCD debugger
# Copyright (c) 2006-2020 Arm Limited
# Copyright (c) 2020 Patrick Huesmann
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import six
from time import sleep
import platform
import errno
from hashlib import sha1
from base64 import b32encode
from .interface import Interface
from .common import (
USB_CLASS_HID,
filter_device_by_class,
is_known_cmsis_dap_vid_pid,
check_ep,
)
from ..dap_access_api import DAPAccessIntf
from ... import common
LOG = logging.getLogger(__name__)
try:
import usb.core
import usb.util
except:
if platform.system() == "Linux":
LOG.error("PyUSB is required for CMSIS-DAP support on Linux")
IS_AVAILABLE = False
else:
IS_AVAILABLE = True
class PyUSB(Interface):
"""! @brief CMSIS-DAP USB interface class using pyusb for the backend.
"""
isAvailable = IS_AVAILABLE
did_show_no_libusb_warning = False
def __init__(self):
super(PyUSB, self).__init__()
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.serial_number = None
self.kernel_driver_was_attached = False
self.closed = True
self.thread = None
self.rcv_data = []
self.read_sem = threading.Semaphore(0)
self.packet_size = 64
def open(self):
assert self.closed is True
# Get device handle
dev = usb.core.find(custom_match=FindDap(self.serial_number))
if dev is None:
raise DAPAccessIntf.DeviceError("Device %s not found" % self.serial_number)
# get active config
config = dev.get_active_configuration()
# Get count of HID interfaces and create the matcher object
hid_interface_count = len(list(usb.util.find_descriptor(config, find_all=True, bInterfaceClass=USB_CLASS_HID)))
matcher = MatchCmsisDapv1Interface(hid_interface_count)
# Get CMSIS-DAPv1 interface
interface = usb.util.find_descriptor(config, custom_match=matcher)
if interface is None:
raise DAPAccessIntf.DeviceError("Device %s has no CMSIS-DAPv1 interface" %
self.serial_number)
interface_number = interface.bInterfaceNumber
# Find endpoints
ep_in, ep_out = None, None
for endpoint in interface:
if endpoint.bEndpointAddress & usb.util.ENDPOINT_IN:
ep_in = endpoint
else:
ep_out = endpoint
# Detach kernel driver
self.kernel_driver_was_attached = False
try:
if dev.is_kernel_driver_active(interface_number):
LOG.debug("Detaching Kernel Driver of Interface %d from USB device (VID=%04x PID=%04x).", interface_number, dev.idVendor, dev.idProduct)
dev.detach_kernel_driver(interface_number)
self.kernel_driver_was_attached = True
except (NotImplementedError, usb.core.USBError) as e:
# Some implementations don't don't have kernel attach/detach
LOG.warning("USB Kernel Driver Detach Failed ([%s] %s). Attached driver may interfere with pyOCD operations.", e.errno, e.strerror)
pass
# Explicitly claim the interface
try:
usb.util.claim_interface(dev, interface_number)
except usb.core.USBError as exc:
raise six.raise_from(DAPAccessIntf.DeviceError("Unable to open device"), exc)
# Update all class variables if we made it here
self.ep_out = ep_out
self.ep_in = ep_in
self.dev = dev
self.intf_number = interface_number
# Start RX thread as the last step
self.closed = False
self.start_rx()
def start_rx(self):
# Flush the RX buffers by reading until timeout exception
try:
while True:
self.ep_in.read(self.ep_in.wMaxPacketSize, 1)
except usb.core.USBError:
# USB timeout expected
pass
# Start RX thread
self.thread = threading.Thread(target=self.rx_task)
self.thread.daemon = True
self.thread.start()
def rx_task(self):
try:
while not self.closed:
self.read_sem.acquire()
if not self.closed:
self.rcv_data.append(self.ep_in.read(self.ep_in.wMaxPacketSize, 10 * 1000))
finally:
# Set last element of rcv_data to None on exit
self.rcv_data.append(None)
@staticmethod
def get_all_connected_interfaces():
"""! @brief Returns all the connected CMSIS-DAP devices.
returns an array of PyUSB (Interface) objects
"""
# find all cmsis-dap devices
try:
all_devices = usb.core.find(find_all=True, custom_match=FindDap())
except usb.core.NoBackendError:
if not PyUSB.did_show_no_libusb_warning:
LOG.warning("CMSIS-DAPv1 probes may not be detected because no libusb library was found.")
PyUSB.did_show_no_libusb_warning = True
return []
# iterate on all devices found
boards = []
for board in all_devices:
new_board = PyUSB()
new_board.vid = board.idVendor
new_board.pid = board.idProduct
new_board.product_name = board.product or hex(board.idProduct)
new_board.vendor_name = board.manufacturer or hex(board.idVendor)
if board.serial_number:
new_board.serial_number = board.serial_number
else:
s = new_board.vendor_name + ',' + new_board.product_name
if board.bus:
s += ',' + str(board.bus)
if board.address:
s += ',' + str(board.address)
new_board.serial_number = b32encode(sha1(s.encode()).digest()).decode('ascii')[:16]
boards.append(new_board)
return boards
def write(self, data):
"""! @brief Write data on the OUT endpoint associated to the HID interface
"""
report_size = self.packet_size
if self.ep_out:
report_size = self.ep_out.wMaxPacketSize
for _ in range(report_size - len(data)):
data.append(0)
self.read_sem.release()
if not self.ep_out:
bmRequestType = 0x21 #Host to device request of type Class of Recipient Interface
bmRequest = 0x09 #Set_REPORT (HID class-specific request for transferring data over EP0)
wValue = 0x200 #Issuing an OUT report
wIndex = self.intf_number #mBed Board interface number for HID
self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data)
return
self.ep_out.write(data)
def read(self):
"""! @brief Read data on the IN endpoint associated to the HID interface
"""
while len(self.rcv_data) == 0:
sleep(0)
if self.rcv_data[0] is None:
raise DAPAccessIntf.DeviceError("Device %s read thread exited" %
self.serial_number)
return self.rcv_data.pop(0)
def close(self):
"""! @brief Close the interface
"""
assert self.closed is False
LOG.debug("closing interface")
self.closed = True
self.read_sem.release()
self.thread.join()
assert self.rcv_data[-1] is None
self.rcv_data = []
usb.util.release_interface(self.dev, self.intf_number)
if self.kernel_driver_was_attached:
try:
self.dev.attach_kernel_driver(self.intf_number)
except Exception as exception:
LOG.warning('Exception attaching kernel driver: %s',
str(exception))
usb.util.dispose_resources(self.dev)
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.kernel_driver_was_attached = False
self.thread = None
class MatchCmsisDapv1Interface(object):
"""! @brief Match class for finding CMSIS-DAPv1 interface.
This match class performs several tests on the provided USB interface descriptor, to
determine whether it is a CMSIS-DAPv1 interface. These requirements must be met by the
interface:
1. If there is more than one HID interface on the device, the interface must have an interface
name string containing "CMSIS-DAP".
2. bInterfaceClass must be 0x03 (HID).
3. bInterfaceSubClass must be 0.
4. Must have interrupt in endpoint, with an optional interrupt out endpoint, in that order.
"""
def __init__(self, hid_interface_count):
"""! @brief Constructor."""
self._hid_count = hid_interface_count
def __call__(self, interface):
"""! @brief Return True if this is a CMSIS-DAPv1 interface."""
try:
if self._hid_count > 1:
interface_name = usb.util.get_string(interface.device, interface.iInterface)
# This tells us whether the interface is CMSIS-DAP, but not whether it's v1 or v2.
if (interface_name is None) or ("CMSIS-DAP" not in interface_name):
return False
# Now check the interface class to distinguish v1 from v2.
if (interface.bInterfaceClass != USB_CLASS_HID) \
or (interface.bInterfaceSubClass != 0):
return False
# Must have either 1 or 2 endpoints.
if interface.bNumEndpoints not in (1, 2):
return False
endpoint_attrs = [
(usb.util.endpoint_direction(ep.bEndpointAddress),
usb.util.endpoint_type(ep.bmAttributes))
for ep in interface
]
# Possible combinations of endpoints
ENDPOINT_ATTRS_ALLOWED = [
# One interrupt endpoint IN
[(usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_INTR)],
# Two interrupt endpoints, first one IN, second one OUT
[(usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_INTR),
(usb.util.ENDPOINT_OUT, usb.util.ENDPOINT_TYPE_INTR)],
# Two interrupt endpoints, first one OUT, second one IN
[(usb.util.ENDPOINT_OUT, usb.util.ENDPOINT_TYPE_INTR),
(usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_INTR)],
]
if endpoint_attrs not in ENDPOINT_ATTRS_ALLOWED:
return False
# All checks passed, this is a CMSIS-DAPv2 interface!
return True
except (UnicodeDecodeError, IndexError):
# UnicodeDecodeError exception can be raised if the device has a corrupted interface name.
# Certain versions of STLinkV2 are known to have this problem. If we can't read the
# interface name, there's no way to tell if it's a CMSIS-DAPv2 interface.
#
# IndexError can be raised if an endpoint is missing.
return False
class FindDap(object):
"""! @brief CMSIS-DAP match class to be used with usb.core.find"""
def __init__(self, serial=None):
"""! @brief Create a new FindDap object with an optional serial number"""
self._serial = serial
def __call__(self, dev):
"""! @brief Return True if this is a DAP device, False otherwise"""
# Check if the device class is a valid one for CMSIS-DAP.
if filter_device_by_class(dev.idVendor, dev.idProduct, dev.bDeviceClass):
return False
try:
# First attempt to get the active config. This produces a more direct error
# when you don't have device permissions on Linux
config = dev.get_active_configuration()
# Now read the product name string.
device_string = dev.product
if (device_string is None) or ("CMSIS-DAP" not in device_string):
return False
# Get count of HID interfaces.
hid_interface_count = len(list(usb.util.find_descriptor(config, find_all=True, bInterfaceClass=USB_CLASS_HID)))
# Find the CMSIS-DAPv1 interface.
matcher = MatchCmsisDapv1Interface(hid_interface_count)
cmsis_dap_interface = usb.util.find_descriptor(config, custom_match=matcher)
except usb.core.USBError as error:
if error.errno == errno.EACCES and platform.system() == "Linux":
msg = ("%s while trying to interrogate a USB device "
"(VID=%04x PID=%04x). This can probably be remedied with a udev rule. "
"See <https://github.com/mbedmicro/pyOCD/tree/master/udev> for help." %
(error, dev.idVendor, dev.idProduct))
# If we recognize this device as one that should be CMSIS-DAP, we can raise
# the level of the log message since it's almost certainly a permissions issue.
if is_known_cmsis_dap_vid_pid(dev.idVendor, dev.idProduct):
LOG.warning(msg)
else:
LOG.debug(msg)
else:
LOG.debug("Error accessing USB device (VID=%04x PID=%04x): %s",
dev.idVendor, dev.idProduct, error)
return False
except (IndexError, NotImplementedError, ValueError, UnicodeDecodeError) as error:
LOG.debug("Error accessing USB device (VID=%04x PID=%04x): %s", dev.idVendor, dev.idProduct, error)
return False
if cmsis_dap_interface is None:
return False
if self._serial is not None:
if self._serial == "" and dev.serial_number is None:
return True
if self._serial != dev.serial_number:
return False
return True
|
imageops.py
|
# Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import commands
import hashlib
import urlparse
import requests
import threading
import os
import shutil
from zvmsdk import client as zvmclient
from zvmsdk import config
from zvmsdk import constants
from zvmsdk import database
from zvmsdk import exception
from zvmsdk import utils as zvmutils
from zvmsdk import log
LOG = log.LOG
CONF = config.CONF
_IMAGEOPS = None
_LOCK = threading.Lock()
CHUNKSIZE = 4096
def get_imageops():
global _IMAGEOPS
if _IMAGEOPS is None:
_IMAGEOPS = ImageOps()
return _IMAGEOPS
class ImageOps(object):
def __init__(self):
self.zvmclient = zvmclient.get_zvmclient()
self._pathutils = zvmutils.PathUtils()
self._ImageDbOperator = database.ImageDbOperator()
def image_get_root_disk_size(self, image_name):
return self.zvmclient.image_get_root_disk_size(image_name)
def image_import(self, image_name, url, image_meta, remote_host=None):
if CONF.zvm.client_type == 'xcat':
self.zvmclient.image_import(image_name,
url,
image_meta,
remote_host=remote_host)
else:
self.import_image_to_sdk_imagerepo(image_name, url, image_meta,
remote_host)
def image_query(self, imagekeyword=None):
return self.zvmclient.image_query(imagekeyword)
def image_delete(self, image_name):
return self.zvmclient.image_delete(image_name)
def import_image_to_sdk_imagerepo(self, image_name, url, image_meta,
remote_host=None):
"""Import the image specified in url to SDK image repository, and
create a record in image db, the imported images are located in
image_repository/prov_method/os_version/, for example,
/opt/sdk/images/netboot/rhel7.2/90685d2b-167b.img"""
try:
target = self._get_image_path_by_name(image_name,
image_meta['os_version'], constants.IMAGE_TYPE['DEPLOY'])
self._pathutils.create_import_image_repository(
image_meta['os_version'], constants.IMAGE_TYPE['DEPLOY'])
self._scheme2backend(urlparse.urlparse(url).scheme).image_import(
image_name, url,
image_meta,
remote_host=remote_host)
# Check md5 after import to ensure import a correct image
# TODO change to use query imagename in db
expect_md5sum = image_meta.get('md5sum')
real_md5sum = self._get_md5sum(target)
if expect_md5sum and expect_md5sum != real_md5sum:
err = ("The md5sum after import is not same as source image,"
" the image has been broken")
raise exception.SDKImageImportException(err)
LOG.info("Image %s is import successfully" % image_name)
disk_size_units = self._get_disk_size_units(target)
image_size = self._get_image_size(target)
self._ImageDbOperator.image_add_record(image_name,
image_meta['os_version'],
real_md5sum,
disk_size_units,
image_size)
except KeyError:
raise exception.SDKUnsupportedImageBackend("No backend found for"
" '%s'" % urlparse.urlparse(url).scheme)
except Exception as err:
msg = ("Import image to zvmsdk image repository error due to: %s"
% str(err))
# Cleanup the image from image repository
self._pathutils.remove_file(target)
raise exception.SDKImageImportException(msg=msg)
def _get_disk_size_units(self, image_path):
"""Return a string to indicate disk units in format 3390:CYL or 408200:
BLK"""
command = 'hexdump -n 48 -C %s' % image_path
(rc, output) = commands.getstatusoutput(command)
LOG.debug("hexdump result is %s" % output)
if rc:
msg = ("Error happened when executing command hexdump with"
"reason: %s" % output)
LOG.error(msg)
raise exception.ZVMImageError(msg=msg)
try:
root_disk_size = int(output[144:156])
disk_units = output[220:223]
root_disk_units = ':'.join([str(root_disk_size), disk_units])
except ValueError:
msg = ("Image file at %s is missing built-in disk size "
"metadata, it was probably not captured by SDK" %
image_path)
raise exception.ZVMImageError(msg=msg)
if 'FBA' not in output and 'CKD' not in output:
msg = ("The image's disk type is not valid. Currently we only"
" support FBA and CKD disk")
raise exception.ZVMImageError(msg=msg)
LOG.debug("The image's root_disk_units is %s" % root_disk_units)
return root_disk_units
def _get_image_size(self, image_path):
"""Return disk size in bytes"""
command = 'du -b %s' % image_path
(rc, output) = commands.getstatusoutput(command)
if rc:
msg = ("Error happened when executing command du -b with"
"reason: %s" % output)
LOG.error(msg)
raise exception.ZVMImageError(msg=msg)
size = output.split()[0]
return size
def _get_image_path_by_name(self, image_name, image_os_version, type):
target = '/'.join([CONF.image.sdk_image_repository,
type,
image_os_version,
image_name])
return target
def _scheme2backend(self, scheme):
return {
"file": FilesystemBackend,
"http": HTTPBackend,
# "https": HTTPSBackend
}[scheme]
def _get_md5sum(self, fpath):
"""Calculate the md5sum of the specific image file"""
current_md5 = hashlib.md5()
if isinstance(fpath, basestring) and os.path.exists(fpath):
with open(fpath, "rb") as fh:
for chunk in self._read_chunks(fh):
current_md5.update(chunk)
elif (fpath.__class__.__name__ in ["StringIO", "StringO"] or
isinstance(fpath, file)):
for chunk in self._read_chunks(fpath):
current_md5.update(chunk)
else:
return ""
return current_md5.hexdigest()
def _read_chunks(self, fh):
fh.seek(0)
chunk = fh.read(CHUNKSIZE)
while chunk:
yield chunk
chunk = fh.read(CHUNKSIZE)
else:
fh.seek(0)
class FilesystemBackend(object):
@classmethod
def image_import(cls, image_name, url, image_meta, **kwargs):
"""Import image from remote host to local image repository using scp.
If remote_host not specified, it means the source file exist in local
file system, just copy the image to image repository
"""
try:
source = urlparse.urlparse(url).path
target = '/'.join([CONF.image.sdk_image_repository,
'netboot',
image_meta['os_version'],
image_name])
if kwargs['remote_host']:
if '@' in kwargs['remote_host']:
source_path = ':'.join([kwargs['remote_host'], source])
command = ' '.join(['/usr/bin/scp', source_path, target])
(rc, output) = commands.getstatusoutput(command)
if rc:
msg = ("Error happened when copying image file with"
"reason: %s" % output)
LOG.error(msg)
raise
else:
msg = ("The specified remote_host %s format invalid" %
kwargs['remote_host'])
LOG.error(msg)
raise
else:
LOG.debug("Remote_host not specified, will copy from local")
shutil.copyfile(source, target)
except Exception as err:
msg = ("Error happened when importing image to SDK"
" image repository with reason: %s" % str(err))
LOG.error(msg)
raise err
class HTTPBackend(object):
@classmethod
def image_import(cls, image_name, url, image_meta, **kwargs):
import_image = MultiThreadDownloader(image_name, url, image_meta)
import_image.run()
class MultiThreadDownloader(threading.Thread):
def __init__(self, image_name, url, image_meta):
super(MultiThreadDownloader, self).__init__()
self.url = url
# Set thread number
self.threadnum = 8
self.name = image_name
self.image_osdistro = image_meta['os_version']
r = requests.head(self.url)
# Get the size of the download resource
self.totalsize = int(r.headers['Content-Length'])
self.target = '/'.join([CONF.image.sdk_image_repository, 'netboot',
self.image_osdistro,
self.name])
def get_range(self):
ranges = []
offset = int(self.totalsize / self.threadnum)
for i in range(self.threadnum):
if i == self.threadnum - 1:
ranges.append((i * offset, ''))
else:
# Get the process range for each thread
ranges.append((i * offset, (i + 1) * offset))
return ranges
def download(self, start, end):
headers = {'Range': 'Bytes=%s-%s' % (start, end),
'Accept-Encoding': '*'}
# Get the data
res = requests.get(self.url, headers=headers)
# seek to the right position for writing data
LOG.debug("Downloading file range %s:%s success" % (start, end))
with _LOCK:
self.fd.seek(start)
self.fd.write(res.content)
def run(self):
self.fd = open(self.target, 'w')
thread_list = []
n = 0
for ran in self.get_range():
start, end = ran
LOG.debug('thread %d start:%s,end:%s' % (n, start, end))
n += 1
# Open thread
thread = threading.Thread(target=self.download, args=(start, end))
thread.start()
thread_list.append(thread)
for i in thread_list:
i.join()
LOG.debug('Download %s success' % (self.name))
self.fd.close()
|
_client.py
|
from statefun_tasks import DefaultSerialiser, PipelineBuilder, TaskRequest, TaskResult, TaskException, TaskAction, \
TaskActionRequest, TaskActionResult, TaskActionException, TaskStatus as TaskStatusProto
from statefun_tasks.client import TaskError, TaskStatus
from google.protobuf.any_pb2 import Any
from kafka import KafkaProducer, KafkaConsumer, TopicPartition
import logging
from uuid import uuid4
from threading import Thread
import asyncio
from concurrent.futures import Future
_log = logging.getLogger('FlinkTasks')
class FlinkTasksClient(object):
"""
Client for submitting TaskRequests / TaskActionRequests
Replies are handled on a dedicated thread created per instance so FlinkTasksClientFactory.get_client() is preferred to
instantiating this class directly.
:param kafka_broker_url: url of the kafka broker used for ingress and egress
:param request_topics: dictionary of worker to ingress topic mappings (use None for default)
e.g. {'example/worker': 'example.requests', None: 'example.default.requests'}
:param action_toptics: as per request_topics but used for action requests
:param reply_topic: topic to listen on for responses (a unique consumer group id will be created)
:param optional group_id: kafka group id to use when subscribing to reply_topic
:param optional serialiser: serialiser to use (will use DefaultSerialiser if not set)
"""
def __init__(self, kafka_broker_url, request_topics, action_topics, reply_topic, group_id=None, serialiser=None):
self._kafka_broker_url = kafka_broker_url
self._requests = {}
self._request_topics = request_topics
self._action_topics = action_topics
self._reply_topic = reply_topic
self._group_id = group_id
self._serialiser = serialiser if serialiser is not None else DefaultSerialiser()
self._producer = KafkaProducer(bootstrap_servers=[kafka_broker_url])
self._consumer = KafkaConsumer(
self._reply_topic,
bootstrap_servers=[self._kafka_broker_url],
auto_offset_reset='earliest',
group_id=self._group_id)
self._consumer_thread = Thread(target=self._consume, args=())
self._consumer_thread.daemon = True
self._consumer_thread.start()
@staticmethod
def _get_request_key(item):
if isinstance(item, (TaskRequest, TaskResult, TaskException)):
return f'request.{item.id}'
elif isinstance(item, (TaskActionRequest, TaskActionResult, TaskActionException)):
return f'action.{item.action}.{item.id}'
else:
raise ValueError(f'Unsupported request type {type(item)}')
@staticmethod
def _try_get_topic_for(pipeline: PipelineBuilder, topics, topic=None):
if topic is not None:
return topic
destination = pipeline.get_inital_destination()
if destination in topics:
return topics[destination]
if None in topics:
return topics[None]
return None
def _get_request_topic(self, pipeline: PipelineBuilder, topic=None):
topic = self._try_get_topic_for(pipeline, self._request_topics, topic)
if topic is None:
raise ValueError(f'Could not find a topic to send this request to')
else:
return topic
def _get_action_topic(self, pipeline: PipelineBuilder, topic=None):
topic = self._try_get_topic_for(pipeline, self._action_topics, topic)
if topic is None:
raise ValueError(f'Could not find a topic to send this action to')
else:
return topic
def _submit_request(self, request, topic):
request_id = self._get_request_key(request)
# if we have already subscribed then don't subscribe again
future = self._requests.get(request_id, None)
if future is not None:
return future
# else create new future for this request
future = Future()
self._requests[request_id] = future
request.reply_topic = self._reply_topic
key = request.id.encode('utf-8')
val = request.SerializeToString()
self._producer.send(topic=topic, key=key, value=val)
self._producer.flush()
return future
def _submit_action(self, task_id, action, topic):
task_action = TaskActionRequest(id=task_id, action=action, reply_topic=self._reply_topic)
return self._submit_request(task_action, topic)
def submit(self, pipeline: PipelineBuilder, topic=None) -> Future:
"""
Submit a pipeline to Flink
:param pipeline: the pipeline
:param optional topic: override the default ingress topic
:return: a Future encapsulating the result of the pipeline
"""
task_request = pipeline.to_task_request(self._serialiser)
topic = self._get_request_topic(pipeline, topic)
return self._submit_request(task_request, topic)
async def submit_async(self, pipeline: PipelineBuilder, topic=None):
"""
Submit a pipeline to Flink
:param pipeline: the pipeline
:param optional topic: override the default ingress topic
:return: the result of the pipeline
"""
return await asyncio.wrap_future(self.submit(pipeline, topic=topic))
def get_status(self, pipeline: PipelineBuilder, topic=None) -> Future:
"""
Get the status of a previously submitted pipeline
:param pipeline: the pipeline
:param optional topic: override the default ingress topic
:return: a Future encapsulating the status of the pipeline
"""
topic = self._get_action_topic(pipeline, topic)
return self._submit_action(pipeline.id, TaskAction.GET_STATUS, topic)
async def get_status_async(self, pipeline: PipelineBuilder, topic=None):
"""
Get the status of a previously submitted pipeline
:param pipeline: the pipeline
:param optional topic: override the default ingress topic
:return: the status of the pipeline
"""
return await asyncio.wrap_future(self.get_status(pipeline, topic))
def get_request(self, pipeline: PipelineBuilder, topic=None) -> Future:
"""
Get the original TaskRequest for a previously submitted pipeline
:param pipeline: the pipeline
:param optional topic: override the default ingress topic
:return: a Future encapsulating the original TaskRequest
"""
topic = self._get_action_topic(pipeline, topic)
return self._submit_action(pipeline.id, TaskAction.GET_REQUEST, topic)
async def get_request_async(self, pipeline: PipelineBuilder, topic=None):
"""
Get the original TaskRequest for a previously submitted pipeline
:param pipeline: the pipeline
:param optional topic: override the default ingress topic
:return: the original TaskRequest
"""
return await asyncio.wrap_future(self.get_request(pipeline, topic))
def get_result(self, pipeline: PipelineBuilder, topic=None) -> Future:
"""
Get the TaskResult for a previously submitted pipeline
:param pipeline: the pipeline
:param optional topic: override the default ingress topic
:return: a Future encapsulating the original TaskResult
"""
topic = self._get_action_topic(pipeline, topic)
return self._submit_action(pipeline.id, TaskAction.GET_RESULT, topic)
async def get_result_async(self, pipeline: PipelineBuilder, topic=None):
"""
Get the TaskResult for a previously submitted pipeline
:param pipeline: the pipeline
:param optional topic: override the default ingress topic
:return: the original TaskResult
"""
return await asyncio.wrap_future(self.get_result(pipeline, topic))
def _consume(self):
while True:
try:
for message in self._consumer:
_log.info(f'Message received - {message}')
any = Any()
any.ParseFromString(message.value)
if any.Is(TaskException.DESCRIPTOR):
self._raise_exception(any, TaskException)
elif any.Is(TaskResult.DESCRIPTOR):
self._return_result(any, TaskResult)
elif any.Is(TaskActionException.DESCRIPTOR):
self._raise_exception(any, TaskActionException)
elif any.Is(TaskActionResult.DESCRIPTOR):
self._return_action_result(any, TaskActionResult)
except Exception as ex:
_log.warning(f'Exception in consumer thread - {ex}', exc_info=ex)
@staticmethod
def _unpack(any_proto: Any, proto_type):
proto = proto_type()
any_proto.Unpack(proto)
return proto
def _unpack_with_future(self, any_proto: Any, proto_type):
proto = self._unpack(any_proto, proto_type)
request_id = self._get_request_key(proto)
future = self._requests.get(request_id, None)
if future is not None:
del self._requests[request_id]
return proto, future
return None, None
def _return_action_result(self, any_proto: Any, proto_type):
proto, future = self._unpack_with_future(any_proto, proto_type)
if future is not None:
try:
if proto.action == TaskAction.GET_STATUS:
future.set_result(TaskStatus(self._unpack(proto.result, TaskStatusProto).status))
elif proto.action == TaskAction.GET_REQUEST:
future.set_result(self._unpack(proto.result, TaskRequest))
elif proto.action == TaskAction.GET_RESULT:
future.set_result(self._unpack(proto.result, TaskResult))
else:
raise ValueError(f'Unsupported action {TaskAction.Name(proto.action)}')
except Exception as ex:
future.set_exception(ex)
def _return_result(self, any: Any, proto_type):
task_result, future = self._unpack_with_future(any, proto_type)
if future is not None:
try:
result, _ = self._serialiser.deserialise_result(task_result)
future.set_result(result)
except Exception as ex:
future.set_exception(ex)
def _raise_exception(self, any: Any, proto_type):
task_exception, future = self._unpack_with_future(any, proto_type)
if future is not None:
try:
future.set_exception(TaskError(task_exception))
except Exception as ex:
future.set_exception(ex)
class FlinkTasksClientFactory():
"""
Factory for creating memoized FlinkTasksClients
"""
__clients = {}
@staticmethod
def get_client(kafka_broker_url, request_topics: dict, action_topics: dict, reply_topic, serialiser=None) -> FlinkTasksClient:
"""
Creates a FlinkTasksClient for submitting tasks to flink. Clients are memoized by broker url and reply topic.
:param kafka_broker_url: url of the kafka broker used for ingress and egress
:param request_topics: dictionary of worker to ingress topic mappings (use None for default)
e.g. {'example/worker': 'example.requests', None: 'example.default.requests'}
:param action_toptics: as per request_topics but used for action requests
:param reply_topic: topic to listen on for responses (a unique consumer group id will be created)
:param optional serialiser: serialiser to use (will use DefaultSerialiser if not set)
"""
key = f'{kafka_broker_url}.{reply_topic}'
if key not in FlinkTasksClientFactory.__clients:
client = FlinkTasksClient(kafka_broker_url, request_topics, action_topics, reply_topic, serialiser=serialiser, group_id=str(uuid4()))
FlinkTasksClientFactory.__clients[key] = client
return FlinkTasksClientFactory.__clients[key]
|
servers.py
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for managing server processes required by Oppia."""
from __future__ import annotations
import contextlib
import logging
import os
import re
import shutil
import signal
import subprocess
import sys
import threading
from core import feconf
from core import utils
from scripts import common
@contextlib.contextmanager
def managed_process(
command_args, human_readable_name='Process', shell=False,
timeout_secs=60, **popen_kwargs):
"""Context manager for starting and stopping a process gracefully.
Args:
command_args: list(int|str). A sequence of program arguments, where the
program to execute is the first item. Ints are allowed in order to
accomodate e.g. port numbers.
human_readable_name: str. The human-readable name of the process. Used
by the function's logging logic to improve readability.
shell: bool. Whether the command should be run inside of its own shell.
WARNING: Executing shell commands that incorporate unsanitized input
from an untrusted source makes a program vulnerable to
[shell injection](https://w.wiki/_Ac2), a serious security flaw
which can result in arbitrary command execution. For this reason,
the use of `shell=True` is **strongly discouraged** in cases where
the command string is constructed from external input.
timeout_secs: int. The time allotted for the managed process and its
descendants to terminate themselves. After the timeout, any
remaining processes will be killed abruptly.
**popen_kwargs: dict(str: *). Same kwargs as `subprocess.Popen`.
Yields:
psutil.Process. The process managed by the context manager.
"""
# TODO(#11549): Move this to top of the file.
if common.PSUTIL_DIR not in sys.path:
sys.path.insert(1, common.PSUTIL_DIR)
import psutil
get_proc_info = lambda p: (
'%s(name="%s", pid=%d)' % (human_readable_name, p.name(), p.pid)
if p.is_running() else '%s(pid=%d)' % (human_readable_name, p.pid))
stripped_args = (('%s' % arg).strip() for arg in command_args)
non_empty_args = (s for s in stripped_args if s)
command = ' '.join(non_empty_args) if shell else list(non_empty_args)
human_readable_command = command if shell else ' '.join(command)
msg = 'Starting new %s: %s' % (human_readable_name, human_readable_command)
print(msg)
popen_proc = psutil.Popen(command, shell=shell, **popen_kwargs)
try:
yield popen_proc
finally:
print('Stopping %s...' % get_proc_info(popen_proc))
procs_still_alive = [popen_proc]
try:
if popen_proc.is_running():
# Children must be terminated before the parent, otherwise they
# may become zombie processes.
procs_still_alive = (
popen_proc.children(recursive=True) + [popen_proc])
procs_to_kill = []
for proc in procs_still_alive:
if proc.is_running():
logging.info('Terminating %s...' % get_proc_info(proc))
proc.terminate()
procs_to_kill.append(proc)
else:
logging.info('%s has already ended.' % get_proc_info(proc))
procs_gone, procs_still_alive = (
psutil.wait_procs(procs_to_kill, timeout=timeout_secs))
for proc in procs_still_alive:
logging.warning('Forced to kill %s!' % get_proc_info(proc))
proc.kill()
for proc in procs_gone:
logging.info('%s has already ended.' % get_proc_info(proc))
except Exception:
# NOTE: Raising an exception while exiting a context manager is bad
# practice, so we log and suppress exceptions instead.
logging.exception(
'Failed to stop %s gracefully!' % get_proc_info(popen_proc))
@contextlib.contextmanager
def managed_dev_appserver(
app_yaml_path, env=None, log_level='info',
host='0.0.0.0', port=8080, admin_host='0.0.0.0', admin_port=8000,
enable_host_checking=True, automatic_restart=False,
skip_sdk_update_check=False):
"""Returns a context manager to start up and shut down a GAE dev appserver.
Args:
app_yaml_path: str. Path to the app.yaml file which defines the
structure of the server.
env: dict(str: str) or None. Defines the environment variables for the
new process.
log_level: str. The lowest log level generated by the application code
and the development server. Expected values are: debug, info,
warning, error, critical.
host: str. The host name to which the app server should bind.
port: int. The lowest port to which application modules should bind.
admin_host: str. The host name to which the admin server should bind.
admin_port: int. The port to which the admin server should bind.
enable_host_checking: bool. Whether to enforce HTTP Host checking for
application modules, API server, and admin server. Host checking
protects against DNS rebinding attacks, so only disable after
understanding the security implications.
automatic_restart: bool. Whether to restart instances automatically when
files relevant to their module are changed.
skip_sdk_update_check: bool. Whether to skip checking for SDK updates.
If false, uses .appcfg_nag to decide.
Yields:
psutil.Process. The dev_appserver process.
"""
dev_appserver_args = [
common.CURRENT_PYTHON_BIN,
common.DEV_APPSERVER_PATH,
'--host', host,
'--port', port,
'--admin_host', admin_host,
'--admin_port', admin_port,
'--enable_host_checking', 'true' if enable_host_checking else 'false',
'--automatic_restart', 'true' if automatic_restart else 'false',
'--skip_sdk_update_check', 'true' if skip_sdk_update_check else 'false',
'--log_level', log_level,
'--dev_appserver_log_level', log_level,
app_yaml_path
]
with contextlib.ExitStack() as stack:
# OK to use shell=True here because we are not passing anything that
# came from an untrusted user, only other callers of the script,
# so there's no risk of shell-injection attacks.
proc = stack.enter_context(managed_process(
dev_appserver_args,
human_readable_name='GAE Development Server',
shell=True,
env=env
))
common.wait_for_port_to_be_in_use(port)
yield proc
@contextlib.contextmanager
def managed_firebase_auth_emulator(recover_users=False):
"""Returns a context manager to manage the Firebase auth emulator.
Args:
recover_users: bool. Whether to recover users created by the previous
instance of the Firebase auth emulator.
Yields:
psutil.Process. The Firebase emulator process.
"""
emulator_args = [
common.FIREBASE_PATH, 'emulators:start', '--only', 'auth',
'--project', feconf.OPPIA_PROJECT_ID,
'--config', feconf.FIREBASE_EMULATOR_CONFIG_PATH,
]
emulator_args.extend(
['--import', common.FIREBASE_EMULATOR_CACHE_DIR, '--export-on-exit']
if recover_users else
['--export-on-exit', common.FIREBASE_EMULATOR_CACHE_DIR])
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc_context = managed_process(
emulator_args, human_readable_name='Firebase Emulator', shell=True)
with proc_context as proc:
common.wait_for_port_to_be_in_use(feconf.FIREBASE_EMULATOR_PORT)
yield proc
@contextlib.contextmanager
def managed_elasticsearch_dev_server():
"""Returns a context manager for ElasticSearch server for running tests
in development mode and running a local dev server. This is only required
in a development environment.
Yields:
psutil.Process. The ElasticSearch server process.
"""
# Clear previous data stored in the local cluster.
if os.path.exists(common.ES_PATH_DATA_DIR):
shutil.rmtree(common.ES_PATH_DATA_DIR)
# -q is the quiet flag.
es_args = ['%s/bin/elasticsearch' % common.ES_PATH, '-q']
# Override the default path to ElasticSearch config files.
es_env = {'ES_PATH_CONF': common.ES_PATH_CONFIG_DIR}
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc_context = managed_process(
es_args, human_readable_name='ElasticSearch Server', env=es_env,
shell=True)
with proc_context as proc:
common.wait_for_port_to_be_in_use(feconf.ES_LOCALHOST_PORT)
yield proc
@contextlib.contextmanager
def managed_cloud_datastore_emulator(clear_datastore=False):
"""Returns a context manager for the Cloud Datastore emulator.
Args:
clear_datastore: bool. Whether to delete the datastore's config and data
before starting the emulator.
Yields:
psutil.Process. The emulator process.
"""
emulator_hostport = '%s:%d' % (
feconf.CLOUD_DATASTORE_EMULATOR_HOST,
feconf.CLOUD_DATASTORE_EMULATOR_PORT)
emulator_args = [
common.GCLOUD_PATH, 'beta', 'emulators', 'datastore', 'start',
'--project', feconf.OPPIA_PROJECT_ID,
'--data-dir', common.CLOUD_DATASTORE_EMULATOR_DATA_DIR,
'--host-port', emulator_hostport,
'--consistency=1.0',
'--quiet'
]
if clear_datastore:
emulator_args.append('--no-store-on-disk')
with contextlib.ExitStack() as stack:
data_dir_exists = os.path.exists(
common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
if clear_datastore and data_dir_exists:
# Replace it with an empty directory.
shutil.rmtree(common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
os.makedirs(common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
elif not data_dir_exists:
os.makedirs(common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc = stack.enter_context(managed_process(
emulator_args, human_readable_name='Cloud Datastore Emulator',
shell=True))
common.wait_for_port_to_be_in_use(feconf.CLOUD_DATASTORE_EMULATOR_PORT)
# Environment variables required to communicate with the emulator.
stack.enter_context(common.swap_env(
'DATASTORE_DATASET', feconf.OPPIA_PROJECT_ID))
stack.enter_context(common.swap_env(
'DATASTORE_EMULATOR_HOST', emulator_hostport))
stack.enter_context(common.swap_env(
'DATASTORE_EMULATOR_HOST_PATH', '%s/datastore' % emulator_hostport))
stack.enter_context(common.swap_env(
'DATASTORE_HOST', 'http://%s' % emulator_hostport))
stack.enter_context(common.swap_env(
'DATASTORE_PROJECT_ID', feconf.OPPIA_PROJECT_ID))
stack.enter_context(common.swap_env(
'DATASTORE_USE_PROJECT_ID_AS_APP_ID', 'true'))
stack.enter_context(common.swap_env(
'GOOGLE_CLOUD_PROJECT', feconf.OPPIA_PROJECT_ID))
yield proc
@contextlib.contextmanager
def managed_redis_server():
"""Run the redis server within a context manager that ends it gracefully."""
if common.is_windows_os():
raise Exception(
'The redis command line interface is not installed because your '
'machine is on the Windows operating system. The redis server '
'cannot start.')
# Check if a redis dump file currently exists. This file contains residual
# data from a previous run of the redis server. If it exists, removes the
# dump file so that the redis server starts with a clean slate.
if os.path.exists(common.REDIS_DUMP_PATH):
os.remove(common.REDIS_DUMP_PATH)
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc_context = managed_process(
[common.REDIS_SERVER_PATH, common.REDIS_CONF_PATH],
human_readable_name='Redis Server', shell=True)
with proc_context as proc:
common.wait_for_port_to_be_in_use(feconf.REDISPORT)
try:
yield proc
finally:
subprocess.check_call([common.REDIS_CLI_PATH, 'shutdown', 'nosave'])
def create_managed_web_browser(port):
"""Returns a context manager for a web browser targeting the given port on
localhost. If a web browser cannot be opened on the current system by Oppia,
then returns None instead.
Args:
port: int. The port number to open in the web browser.
Returns:
context manager|None. The context manager to a web browser window, or
None if the current operating system does not support web browsers.
"""
url = 'http://localhost:%s/' % port
human_readable_name = 'Web Browser'
if common.is_linux_os():
if any(re.match('.*VBOX.*', d) for d in os.listdir('/dev/disk/by-id/')):
return None
else:
return managed_process(
['xdg-open', url], human_readable_name=human_readable_name)
elif common.is_mac_os():
return managed_process(
['open', url], human_readable_name=human_readable_name)
else:
return None
@contextlib.contextmanager
def managed_webpack_compiler(
config_path=None, use_prod_env=False, use_source_maps=False,
watch_mode=False, max_old_space_size=None):
"""Returns context manager to start/stop the webpack compiler gracefully.
Args:
config_path: str|None. Path to an explicit webpack config, or None to
determine it from the other args.
use_prod_env: bool. Whether to compile for use in production. Only
respected if config_path is None.
use_source_maps: bool. Whether to compile with source maps. Only
respected if config_path is None.
watch_mode: bool. Run the compiler in watch mode, which rebuilds on file
change.
max_old_space_size: int|None. Sets the max memory size of the compiler's
"old memory" section. As memory consumption approaches the limit,
the compiler will spend more time on garbage collection in an effort
to free unused memory.
Yields:
psutil.Process. The Webpack compiler process.
"""
if config_path is not None:
pass
elif use_prod_env:
config_path = (
common.WEBPACK_PROD_SOURCE_MAPS_CONFIG if use_source_maps else
common.WEBPACK_PROD_CONFIG)
else:
config_path = (
common.WEBPACK_DEV_SOURCE_MAPS_CONFIG if use_source_maps else
common.WEBPACK_DEV_CONFIG)
compiler_args = [
common.NODE_BIN_PATH, common.WEBPACK_BIN_PATH, '--config', config_path,
]
if max_old_space_size:
# NOTE: --max-old-space-size is a flag for Node.js, not the Webpack
# compiler, so we insert it immediately after NODE_BIN_PATH.
compiler_args.insert(1, '--max-old-space-size=%d' % max_old_space_size)
if watch_mode:
compiler_args.extend(['--color', '--watch', '--progress'])
with contextlib.ExitStack() as exit_stack:
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc = exit_stack.enter_context(managed_process(
compiler_args, human_readable_name='Webpack Compiler', shell=True,
# Capture compiler's output to detect when builds have completed.
stdout=subprocess.PIPE))
if watch_mode:
for line in iter(lambda: proc.stdout.readline() or None, None):
common.write_stdout_safe(line)
# Message printed when a compilation has succeeded. We break
# after the first one to ensure the site is ready to be visited.
if b'Built at: ' in line:
break
else:
# If none of the lines contained the string 'Built at',
# raise an error because a build hasn't finished successfully.
raise IOError('First build never completed')
def print_proc_output():
"""Prints the proc's output until it is exhausted."""
for line in iter(lambda: proc.stdout.readline() or None, None):
common.write_stdout_safe(line)
# Start a thread to print the rest of the compiler's output to stdout.
printer_thread = threading.Thread(target=print_proc_output)
printer_thread.start()
exit_stack.callback(printer_thread.join)
yield proc
@contextlib.contextmanager
def managed_portserver():
"""Returns context manager to start/stop the portserver gracefully.
The portserver listens at PORTSERVER_SOCKET_FILEPATH and allocates free
ports to clients. This prevents race conditions when two clients request
ports in quick succession. The local Google App Engine server that we use to
serve the development version of Oppia uses python_portpicker, which is
compatible with the portserver this function starts, to request ports.
By "compatible" we mean that python_portpicker requests a port by sending a
request consisting of the PID of the requesting process and expects a
response consisting of the allocated port number. This is the interface
provided by this portserver.
Yields:
psutil.Popen. The Popen subprocess object.
"""
# TODO(#11549): Move this to top of the file.
if common.PSUTIL_DIR not in sys.path:
# Our unit tests already configure sys.path correctly, but the
# standalone scripts do not. Because of this, the following line cannot
# be covered. This is fine since we want to cleanup this code anyway in
# #11549.
sys.path.insert(1, common.PSUTIL_DIR) # pragma: no cover
import psutil
# Check if a socket file exists. This file can exist when previous instance
# of the portserver did not close properly. We need to remove as otherwise
# the portserver will fail to start.
if os.path.exists(common.PORTSERVER_SOCKET_FILEPATH):
os.remove(common.PORTSERVER_SOCKET_FILEPATH)
portserver_args = [
'python', '-m', 'scripts.run_portserver',
'--portserver_unix_socket_address', common.PORTSERVER_SOCKET_FILEPATH,
]
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc_context = managed_process(
portserver_args, human_readable_name='Portserver', shell=True)
with proc_context as proc:
try:
yield proc
finally:
# Before exiting the proc_context, try to end the process with
# SIGINT. The portserver is configured to shut down cleanly upon
# receiving this signal.
try:
proc.send_signal(signal.SIGINT)
except OSError:
# Raises when the process has already shutdown, in which case we
# can just return immediately.
return # pylint: disable=lost-exception
else:
# Otherwise, give the portserver 10 seconds to shut down after
# sending CTRL-C (SIGINT).
try:
proc.wait(timeout=10)
except psutil.TimeoutExpired:
# If the server fails to shut down, allow proc_context to
# end it by calling terminate() and/or kill().
pass
@contextlib.contextmanager
def managed_webdriver_server(chrome_version=None):
"""Returns context manager to start/stop the Webdriver server gracefully.
This context manager updates Google Chrome before starting the server.
Args:
chrome_version: str|None. The version of Google Chrome to run the tests
on. If None, then the currently-installed version of Google Chrome
is used instead.
Yields:
psutil.Process. The Webdriver process.
"""
if chrome_version is None:
# Although there are spaces between Google and Chrome in the path, we
# don't need to escape them for Popen (as opposed to on the terminal, in
# which case we would need to escape them for the command to run).
chrome_command = (
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
if common.is_mac_os() else 'google-chrome')
try:
output = subprocess.check_output([chrome_command, '--version'])
except OSError as e:
# For the error message on macOS, we need to add the backslashes in.
# This is because it is likely that a user will try to run the
# command on their terminal and, as mentioned above, the macOS
# chrome version command has spaces in the path which need to be
# escaped for successful terminal use.
raise Exception(
'Failed to execute "%s --version" command. This is used to '
'determine the chromedriver version to use. Please set the '
'chromedriver version manually using the '
'--chrome_driver_version flag. To determine the '
'chromedriver version to be used, please follow the '
'instructions mentioned in the following URL:\n'
'https://chromedriver.chromium.org/downloads/version-selection'
% chrome_command.replace(' ', r'\ ')) from e
installed_version_parts = b''.join(re.findall(rb'[0-9.]', output))
installed_version = '.'.join(
installed_version_parts.decode('utf-8').split('.')[:-1])
response = utils.url_open(
'https://chromedriver.storage.googleapis.com/LATEST_RELEASE_%s' % (
installed_version))
chrome_version = response.read().decode('utf-8')
print('\n\nCHROME VERSION: %s' % chrome_version)
subprocess.check_call([
common.NODE_BIN_PATH, common.WEBDRIVER_MANAGER_BIN_PATH, 'update',
'--versions.chrome', chrome_version,
])
with contextlib.ExitStack() as exit_stack:
if common.is_windows_os():
# NOTE: webdriver-manager (version 13.0.0) uses `os.arch()` to
# determine the architecture of the operating system, however, this
# function can only be used to determine the architecture of the
# machine that compiled `node`. In the case of Windows, we are using
# the portable version, which was compiled on `ia32` machine so that
# is the value returned by this `os.arch` function. Unfortunately,
# webdriver-manager seems to assume that Windows wouldn't run on the
# ia32 architecture, so its help function used to determine download
# link returns null for this, which means that the application has
# no idea about where to download the correct version.
#
# https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/chromedriver.ts#L16
# https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/geckodriver.ts#L21
# https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/chromedriver.ts#L167
# https://github.com/nodejs/node/issues/17036
regex_pattern = re.escape('this.osArch = os.arch();')
arch = 'x64' if common.is_x64_architecture() else 'x86'
replacement_string = 'this.osArch = "%s";' % arch
exit_stack.enter_context(common.inplace_replace_file_context(
common.CHROME_PROVIDER_FILE_PATH, regex_pattern,
replacement_string))
exit_stack.enter_context(common.inplace_replace_file_context(
common.GECKO_PROVIDER_FILE_PATH, regex_pattern,
replacement_string))
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc = exit_stack.enter_context(managed_process([
common.NODE_BIN_PATH, common.WEBDRIVER_MANAGER_BIN_PATH, 'start',
'--versions.chrome', chrome_version, '--quiet', '--standalone',
], human_readable_name='Webdriver manager', shell=True))
common.wait_for_port_to_be_in_use(4444)
yield proc
@contextlib.contextmanager
def managed_protractor_server(
suite_name='full', dev_mode=True, debug_mode=False,
sharding_instances=1, **kwargs):
"""Returns context manager to start/stop the Protractor server gracefully.
Args:
suite_name: str. The suite name whose tests should be run. If the value
is `full`, all tests will run.
dev_mode: bool. Whether the test is running on dev_mode.
debug_mode: bool. Whether to run the protractor tests in debugging mode.
Read the following instructions to learn how to run e2e tests in
debugging mode:
https://www.protractortest.org/#/debugging#disabled-control-flow.
sharding_instances: int. How many sharding instances to be running.
**kwargs: dict(str: *). Keyword arguments passed to psutil.Popen.
Yields:
psutil.Process. The protractor process.
"""
if sharding_instances <= 0:
raise ValueError('Sharding instance should be larger than 0')
protractor_args = [
common.NODE_BIN_PATH,
# This flag ensures tests fail if the `waitFor()` calls time out.
'--unhandled-rejections=strict',
common.PROTRACTOR_BIN_PATH, common.PROTRACTOR_CONFIG_FILE_PATH,
'--params.devMode=%s' % dev_mode,
'--suite', suite_name,
]
if debug_mode:
# NOTE: This is a flag for Node.js, not Protractor, so we insert it
# immediately after NODE_BIN_PATH.
protractor_args.insert(1, '--inspect-brk')
if sharding_instances > 1:
protractor_args.extend([
'--capabilities.shardTestFiles=True',
'--capabilities.maxInstances=%d' % sharding_instances,
])
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
managed_protractor_proc = managed_process(
protractor_args, human_readable_name='Protractor Server', shell=True,
**kwargs)
with managed_protractor_proc as proc:
yield proc
|
GUI.py
|
import pygame
import threading
import time
import dataGetter
import socket
import random
def calculateColour(fireRisk):
if fireRisk >= 90:
return (195, 0, 0) #dark red
elif fireRisk >= 80:
return (255, 0,0) #red
elif fireRisk >= 70:
return (255, 90, 0) #light red
elif fireRisk >= 60:
return (255, 154, 0) #orange
elif fireRisk >= 40:
return (255, 206, 0) #yellow
elif fireRisk >= 20:
return (240, 255, 0) #light yellow
elif fireRisk >= 0:
return (200, 200, 200) #white
def calculateFire(fireIntensity):
if fireIntensity == 1:
return pygame.image.load("smallFlame1.png")
elif fireIntensity == 2:
return pygame.image.load("medFlame1.png")
elif fireIntensity == 3:
return pygame.image.load("largeFlame1.png")
######################
anchor = dataGetter.Tile(ID=[0,0],LAT=50.940,LON=-1.4,HEIGHT=1,windSpeed=1,\
windDirection=1,humidity=1,temperature=1,smoke=1,\
precipitation=1,\
fireRisk=10,hasFire=False,fireIntensity=0)
tile2 = dataGetter.Tile(ID=[0,1],LAT=50.939,LON=-1.4,HEIGHT=2,windSpeed=2,\
windDirection=2,humidity=2,temperature=2,smoke=2,\
precipitation=2,\
fireRisk=20,hasFire=False,fireIntensity=0)
tile3 = dataGetter.Tile(ID=[0,2],LAT=50.938,LON=-1.4,HEIGHT=3,windSpeed=3,\
windDirection=3,humidity=3,temperature=3,smoke=3,\
precipitation=3,\
fireRisk=30,hasFire=True,fireIntensity=1)
tile4 = dataGetter.Tile(ID=[1,0],LAT=50.940,LON=-1.399,HEIGHT=4,windSpeed=4,\
windDirection=1,humidity=4,temperature=4,smoke=4,\
precipitation=4,\
fireRisk=40,hasFire=False,fireIntensity=0)
tile5 = dataGetter.Tile(ID=[1,1],LAT=50.939,LON=-1.399,HEIGHT=5,windSpeed=5,\
windDirection=5,humidity=5,temperature=5,smoke=5,\
precipitation=5,\
fireRisk=50,hasFire=False,fireIntensity=0)
tile6 = dataGetter.Tile(ID=[1,2],LAT=50.938,LON=-1.399,HEIGHT=6,windSpeed=6,\
windDirection=6,humidity=6,temperature=6,smoke=6,\
precipitation=6,\
fireRisk=60,hasFire=False,fireIntensity=0)
tile7 = dataGetter.Tile(ID=[2,0],LAT=50.940,LON=-1.398,HEIGHT=7,windSpeed=7,\
windDirection=7,humidity=7,temperature=7,smoke=7,\
precipitation=7,\
fireRisk=70,hasFire=False,fireIntensity=0)
tile8 = dataGetter.Tile(ID=[2,1],LAT=50.939,LON=-1.398,HEIGHT=8, windSpeed=8,\
windDirection=8,humidity=8,temperature=8,smoke=8,\
precipitation=8,\
fireRisk=80,hasFire=True,fireIntensity=3)
tile9 = dataGetter.Tile(ID=[2,2],LAT=50.938,LON=-1.398,HEIGHT=9, windSpeed=9,\
windDirection=9,humidity=9,temperature=9,smoke=9,\
precipitation=9,\
fireRisk=90,hasFire=False,fireIntensity=0)
tile10 = dataGetter.Tile(ID=[3,0],LAT=50.938,LON=-1.399,HEIGHT=6,windSpeed=6,\
windDirection=6,humidity=6,temperature=6,smoke=6,\
precipitation=6,\
fireRisk=60,hasFire=False,fireIntensity=0)
tile11 = dataGetter.Tile(ID=[3,1],LAT=50.940,LON=-1.398,HEIGHT=7,windSpeed=7,\
windDirection=7,humidity=7,temperature=7,smoke=7,\
precipitation=7,\
fireRisk=70,hasFire=False,fireIntensity=0)
tile12 = dataGetter.Tile(ID=[3,2],LAT=50.939,LON=-1.398,HEIGHT=8, windSpeed=8,\
windDirection=8,humidity=8,temperature=8,smoke=8,\
precipitation=8,\
fireRisk=80,hasFire=False,fireIntensity=0)
tile13 = dataGetter.Tile(ID=[4,0],LAT=50.938,LON=-1.398,HEIGHT=9, windSpeed=9,\
windDirection=9,humidity=9,temperature=9,smoke=9,\
precipitation=9,\
fireRisk=90,hasFire=False,fireIntensity=0)
tile14 = dataGetter.Tile(ID=[4,1],LAT=50.938,LON=-1.399,HEIGHT=6,windSpeed=6,\
windDirection=6,humidity=6,temperature=6,smoke=6,\
precipitation=6,\
fireRisk=60,hasFire=False,fireIntensity=0)
tile15 = dataGetter.Tile(ID=[4,2],LAT=50.940,LON=-1.398,HEIGHT=7,windSpeed=7,\
windDirection=7,humidity=7,temperature=7,smoke=7,\
precipitation=7,\
fireRisk=70,hasFire=False,fireIntensity=0)
tile16 = dataGetter.Tile(ID=[5,0],LAT=50.939,LON=-1.398,HEIGHT=8, windSpeed=8,\
windDirection=8,humidity=8,temperature=8,smoke=8,\
precipitation=8,\
fireRisk=80,hasFire=False,fireIntensity=0)
tile17 = dataGetter.Tile(ID=[5,1],LAT=50.938,LON=-1.398,HEIGHT=9, windSpeed=9,\
windDirection=9,humidity=9,temperature=9,smoke=9,\
precipitation=9,\
fireRisk=90,hasFire=False,fireIntensity=0)
tile18 = dataGetter.Tile(ID=[5,2],LAT=50.938,LON=-1.398,HEIGHT=9, windSpeed=9,\
windDirection=9,humidity=9,temperature=9,smoke=9,\
precipitation=9,\
fireRisk=90,hasFire=False,fireIntensity=0)
tilemap = dataGetter.TileMap(anchor=anchor,step=1,length=15,width=15)
#fakeTiles = [anchor, tile2,tile3, tile4, tile5, tile6, tile7, tile8, tile9,\
#tile10,tile11, tile12, tile13, tile14, tile15, tile16, tile17,\
#tile18]
fakeTiles = []
for i in range(15):
for j in range(15):
if i == 0 and j == 0:
fakeTiles.append(dataGetter.Tile(ID=[i,j],LAT=50,LON=-1,HEIGHT=random.randint(8,12), windSpeed=9,\
windDirection=random.randint(6,12),humidity=random.randint(0,100),temperature=random.randint(5,20),smoke=random.randint(5,12),\
precipitation=random.randint(0,6),\
fireRisk=100,hasFire=False,fireIntensity=0))
else:
fakeTiles.append(dataGetter.Tile(ID=[i,j],LAT=50.938,LON=-1.398,HEIGHT=random.randint(8,12), windSpeed=9,\
windDirection=random.randint(6,12),humidity=random.randint(0,100),temperature=random.randint(5,20),smoke=random.randint(5,12),\
precipitation=random.randint(0,6),\
fireRisk=random.randint(0,100),hasFire=False,fireIntensity=0))
"""if random.randint(0,7) == 0:
fakeTiles[-1].hasFire = True
fakeTiles[-1].fireIntensity = 3"""
#fakeTiles[0].fireRisk = 100
tilemap.setFakeData(fakeTiles)
def getAPIData():
global tilemap
print("hi")
while True:
for i in range(tilemap.length):
for j in range(tilemap.width):
tilemap.tilemap[i][j].updateAPIData()
time.sleep(3)
thread1 = threading.Thread(target = getAPIData)
thread1.start()
def seperateData(data):
data += ","
newData = []
currentString = ""
for i in data:
if i != ",":
currentString += i
else:
newData.append(currentString)
currentString = ""
return newData
def getSensorData():
global tilemap
host = ""
port = 7777
c = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
my_ip = socket.gethostbyname(socket.gethostname())
print(my_ip)
c.bind(('',25565))
c.listen(1)
while True:
client,addr = c.accept()
print(addr)
i=0
while True:
data = client.recv(1024)
if not data:
break
data = seperateData(data.decode('utf-8'))
print("data",data)
x = int(data[4])
y = int(data[5])
#tilemap.tilemap[x][y].setHumidityTrend(float(data[0]))
tilemap.tilemap[x][y].setHumidity(float(data[0]))
#tilemap.tilemap[x][y].setTemperatureTrend(float(data[2]))
tilemap.tilemap[x][y].setTemperature(float(data[1]))
#tilemap.tilemap[x][y].setSmokeTrend(float(data[4]))
tilemap.tilemap[x][y].setSmoke(float(data[2]))
tilemap.tilemap[x][y].setPressure(float(data[3]))
#print("smoke",tilemap.tilemap[x][y].smokeTrend)
#print("temp",tilemap.tilemap[x][y].temperatureTrend)
"""
for i in range(tilemap.length):
for j in range(tilemap.width):
tilemap.tilemap[i][j].detect_fire()
"""
tilemap.tilemap[0][0].detect_fire()
#if i%5 == 0:
tilemap.setSimulatorTiles()
tilemap.simulate()
#i=0
#i += 1
thread2 = threading.Thread(target = getSensorData)
thread2.start()
##############################
pygame.init()
pygame.mixer.init() #initialises pygame
pygame.font.init()
fps = 60 #the game's frames per second
all_fonts = pygame.font.get_fonts()
myfont = pygame.font.SysFont(all_fonts[7], 30)
infoObject = pygame.display.Info()
screen = pygame.display.set_mode((infoObject.current_w, infoObject.current_h),pygame.FULLSCREEN)
pygame.display.set_caption("FAFF")
clock = pygame.time.Clock()
green = (0,255,0)
blue = (0,0,255)
black = (0,0,0)
grey = (128,128,128)
red = (255,0,0)
darkgreen = (34,139,34)
fontNumber = 0
#########################
class button(): #class to quickly make buttons
def __init__(self,colour, x,y,width,height, text='',active_colour = (0,0,255,0.5),fontSize = 45):
self.current_colour = colour
self.colour = colour #button colour
self.active_colour = active_colour #colour of the button while the mouse hovers over it.
self.x = x #x coordinate of top left corner of the button
self.y = y #y coordinate of top left corner of the button
self.width = width #button width
self.height = height #button height
self.text = text #button text
self.fontSize = fontSize
#these are the different button options.
#these options allow many different buttons to be created from this class
def draw(self,screen,outline=None,show_active = False): #method to draw the button
if outline: #decides if the button has an outline.
pygame.draw.rect(screen, outline, (self.x-2,self.y-2,self.width+4,self.height+4),0) #draws the button outline.
#the outline is a black box which is slighly bigger than the button. This appears as an outline
if show_active:
self.current_colour = self.active_colour
pygame.draw.rect(screen,self.current_colour, (self.x,self.y,self.width,self.height),0)
#draws the button
if self.text != "": #only adds text if there is text to add
font = pygame.font.SysFont(all_fonts[fontNumber], self.fontSize) #defines the font used.
text = font.render(self.text, 1, (0,0,0)) #renders the text
screen.blit(text, (self.x + (self.width/2 - text.get_width()/2), self.y + (self.height/2 - text.get_height()/2)))
#puts the text in the center of the button.
if show_active:
self.current_colour = self.colour
def clicked(self, pos):
if pos[0] > self.x and pos[0] < self.x + self.width:
if pos[1] > self.y and pos[1] < self.y + self.height:
return True
return False #A method to check if the mouse is over the button.
#This is run when th user presses the mouse button.
def hover(self): #makes the button change colour when the mouse is hovered over it.
if self.clicked(pygame.mouse.get_pos()):
self.current_colour = self.active_colour
return True
else:
self.current_colour = self.colour
return False
def press(self):#checks if the mouse button is pressed.
if self.clicked(pygame.mouse.get_pos()) and event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
return True
return False
def setColour(self,colour):
self.colour = colour
running = True
############## surfaces
"""
mapSection = pygame.Surface((infoObject.current_w / 2, infoObject.current_h)) ##left side
mapToggleSection = pygame.Surface((mapSection.get_size()[0], mapSection.get_size()[1] / 8))
mapViewSection = pygame.Surface((mapSection.get_size()[0], mapSection.get_size()[1] - mapToggleSection.get_size()[1]))
tableSection = pygame.Surface((infoObject.current_w / 2, infoObject.current_h)) #right side
pageToggleSection = pygame.Surface((infoObject.current_w / 2, infoObject.current_h / 8))
tableContentSection = pygame.Surface((infoObject.current_w / 2, infoObject.current_h / 8))
tableTitleSection = pygame.Surface((infoObject.current_w / 2, infoObject.current_h / 8))
mapSurface = pygame.Surface((infoObject.current_w / 2, infoObject.current_h / 8))
"""
##############import images
"""
smallFlame = pygame.image.load("smallFlame1.png")
smallFlameScaled = pygame.transform.scale(smallFlame, (100, 100))
medFlame = pygame.image.load("medFlame1.png")
medFlameScaled = pygame.transform.scale(medFlame, (100, 100))
largeFlame = pygame.image.load("largeFlame1.png")
largeFlameScaled = pygame.transform.scale(largeFlame, (100, 100))
"""
############## defining buttons
quitButton = button(red,1628,108,100,100,"Quit")
fireButton = button(grey,192,108,384,100,"Fire")
riskButton = button((0,90,235),576,108,384,100,"Risk")
returnButton = button(grey,960,873,768,100,"Back")
############# text
font = pygame.font.SysFont(all_fonts[fontNumber], 70)
text = font.render("Alert Rankings", 1, (255,255,255))
fontHeading = pygame.font.SysFont(all_fonts[fontNumber], 25)
textHeadingLon = fontHeading.render("Longitude", 1, (255,255,255))
textHeadingLat = fontHeading.render("Latitude", 1, (255,255,255))
textHeadingStat = fontHeading.render("Status", 1, (255,255,255))
textHeadingGoTo = fontHeading.render("Show on Map", 1, (255,255,255))
textHeadingInfo = fontHeading.render("More Info", 1, (255,255,255))
#############
mode = "home"
mapType = "risk"
zoom = 12
mapX = 768
mapY = 864
offsetX = 0
offsetY = 0
rankingOffset = 0
showButtons = []
infoButtons = []
heldCoords = [0,0]
flashingTile = None
waitTime = 0
for i in range(15):
showButtons.append(button(grey,960+(153.6*3),2+208+(i+1)*51.2,154,51.2,"Show On Map",fontSize = 25))
infoButtons.append(button(grey,960+(153.6*4),2+208+(i+1)*51.2,154,51.2,"More Info",fontSize = 25))
extraRect = False
flashingTimer = 0
while running:
######
######
clock.tick(fps)
screen.fill((0,0,0))
if waitTime > 0:
waitTime -= 1
if flashingTimer > 0:
flashingTimer -= 1
if (flashingTimer > 10 and flashingTimer < 20) or \
(flashingTimer > 30 and flashingTimer < 40) or \
(flashingTimer > 50 and flashingTimer < 60):
extraRect = True
else:
extraRect = False
#mapToggleSection.fill((215,255,255))
if mode == "home":
ranking = tilemap.getFireAlertRanking()
rankingView = []
if len(ranking) < 15:
rankingViewLength = len(ranking)
else:
rankingViewLength = 15
for i in range(rankingViewLength):
rankingView.append(ranking[i+rankingOffset])
mousePos = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
if mousePos[0] < 864:
if event.button == 4 and zoom < 21:
zoom += 1
elif event.button == 5 and zoom > 1:
zoom -= 1
elif mousePos[0] >= 864:
if event.button == 5 and rankingOffset + 15 < len(ranking):
rankingOffset +=1
elif event.button == 4 and rankingOffset > 1:
rankingOffset -=1
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
offsetX += 1
elif event.key == pygame.K_LEFT:
offsetX -= 1
elif event.key == pygame.K_UP:
offsetY -= 1
elif event.key == pygame.K_DOWN:
offsetY += 1
#riskTableSection
#screen.blit(mapToggleSection,(0,0))
quitButton.draw(screen)
quitButton.hover()
fireButton.draw(screen,(255,255,255))
fireButton.hover()
riskButton.draw(screen,(255,255,255))
riskButton.hover()
if mapType == "risk" and fireButton.press():
mapType = "fire"
fireButton.setColour((0,90,235))
riskButton.setColour(grey)
elif mapType == "fire" and riskButton.press():
mapType = "risk"
fireButton.setColour(grey)
riskButton.setColour((0,90,235))
if mode == "home":
screen.blit(text, (1020,120))
screen.blit(textHeadingLon, (970,220))
screen.blit(textHeadingLat, (1124,220))
screen.blit(textHeadingStat, (1278,220))
screen.blit(textHeadingGoTo, (1432,220))
screen.blit(textHeadingInfo, (1586,220))
fontHeading = pygame.font.SysFont(all_fonts[fontNumber], 20)
for index ,tile in enumerate(rankingView):
textLon = fontHeading.render(str(tile.LON), 1, (255,255,255))
screen.blit(textLon, (970,280+(index)*51.2))
textLat = fontHeading.render(str(tile.LAT), 1, (255,255,255))
screen.blit(textLat, (1124,280+(index)*51.2))
textStat= fontHeading.render(str(tile.status), 1, (255,255,255))
screen.blit(textStat, (1278,280+(index)*51.2))
for i in range(rankingViewLength):
showButtons[i].hover()
showButtons[i].draw(screen)
infoButtons[i].hover()
infoButtons[i].draw(screen)
if infoButtons[i].press() and waitTime == 0:
waitTime = 15
mode = "info"
heldCoords = rankingView[i].ID
if showButtons[i].press() and waitTime == 0:
offsetX = - int(zoom / 2) + rankingView[i].ID[0]
offsetY = - int(zoom / 2) + rankingView[i].ID[1]
flashingTile = rankingView[i]
flashingTimer = 60
########## draw table
for i in range(5 + 1):
for j in range(15 + 1):
#hor
pygame.draw.rect(screen,(255,0 ,0),(960,208 + (j * 768) / 15 + 2,768, 3))
#ver
pygame.draw.rect(screen,(255,0,0),(960 + (i * mapX) / 5, 210 ,1 ,864))
#
###########
elif mode == "info":
#print(heldCoords[0],heldCoords[1])
#print(tilemap.tilemap[0][0].smoke)
returnButton.hover()
returnButton.draw(screen)
font = pygame.font.SysFont(all_fonts[fontNumber], 60)
tileInfoHeading = font.render("LON: "+str(tilemap.tilemap[heldCoords[0]][heldCoords[1]].LON)+" LAT: "+str(tilemap.tilemap[heldCoords[0]][heldCoords[1]].LAT), 1, (255,255,255))
screen.blit(tileInfoHeading, (970,120))
fontHeading = pygame.font.SysFont(all_fonts[fontNumber], 40)
textHeight = fontHeading.render("Height: " + str(tilemap.tilemap[heldCoords[0]][heldCoords[1]].HEIGHT) + " m", 1, (255,255,255))
screen.blit(textHeight, (970,280))
textWindSpeed = fontHeading.render("Wind Speed: " + str(tilemap.tilemap[heldCoords[0]][heldCoords[1]].windSpeed) + " km/h", 1, (255,255,255))
screen.blit(textWindSpeed, (970,331))
textWindSpeed = fontHeading.render("Wind Direction: " + str(tilemap.tilemap[heldCoords[0]][heldCoords[1]].windDirection) + " degrees", 1, (255,255,255))
screen.blit(textWindSpeed, (970,382))
textPrecipitation = fontHeading.render("Precipitation: " + str(tilemap.tilemap[heldCoords[0]][heldCoords[1]].precipitation) + " mm", 1, (255,255,255))
screen.blit(textPrecipitation, (970,433))
textHumidity = fontHeading.render("Humidity: " + str(tilemap.tilemap[heldCoords[0]][heldCoords[1]].humidity) + " %", 1, (255,255,255))
screen.blit(textHumidity, (970,484))
textTemperature = fontHeading.render("Temperature: " + str(tilemap.tilemap[heldCoords[0]][heldCoords[1]].temperature) + " degrees centigrade", 1, (255,255,255))
screen.blit(textTemperature, (970,536))
textSmoke = fontHeading.render("Smoke: " + str(tilemap.tilemap[heldCoords[0]][heldCoords[1]].smoke) + " mg/m³", 1, (255,255,255))
screen.blit(textSmoke, (970,587))
textPressure = fontHeading.render("Pressure: " + str(tilemap.tilemap[heldCoords[0]][heldCoords[1]].pressure) + " hPa", 1, (255,255,255))
screen.blit(textPressure, (970,638))
textFireRisk = fontHeading.render("Fire Risk: " + str(tilemap.tilemap[heldCoords[0]][heldCoords[1]].fireRisk) + " %", 1, (255,255,255))
screen.blit(textFireRisk, (970,689))
textHasFire = fontHeading.render("Fire: " + str(tilemap.tilemap[heldCoords[0]][heldCoords[1]].hasFire), 1, (255,255,255))
screen.blit(textHasFire, (970,740))
textFireIntensity = fontHeading.render("Fire Intensity: " + str(tilemap.tilemap[heldCoords[0]][heldCoords[1]].fireIntensity), 1, (255,255,255))
screen.blit(textFireIntensity, (970,792))
if returnButton.press() and waitTime == 0:
mode = "home"
waitTime = 15
########## drawing map
for i in range(zoom):
for j in range(zoom):
#hor
pygame.draw.rect(screen,(0,255,0),(188,208 + (j * mapY) / zoom + 2,768,3))
#ver
pygame.draw.rect(screen,(0,255,0),(188 + (i * mapX) / zoom,210,3,864))
#
if i + offsetX < tilemap.length and j +offsetY < tilemap.width and i + offsetX > -1 and j + offsetY > -1:
if mapType == "risk":
pygame.draw.rect(screen,calculateColour(tilemap.tilemap[i + offsetX][j + offsetY].getFireRisk()),(188 + (i * mapX) / zoom, 208+(j*mapY)/(zoom)+3, mapX/zoom, mapY/zoom))
elif mapType == "fire":
"""
smallFlame = pygame.image.load("smallFlame1.png")
smallFlameScaled = pygame.transform.scale(smallFlame, (100, 100))
medFlame = pygame.image.load("medFlame1.png")
medFlameScaled = pygame.transform.scale(medFlame, (100, 100))
largeFlame = pygame.image.load("largeFlame1.png")
largeFlameScaled = pygame.transform.scale(largeFlame, (100, 100))
"""
#print(tilemap.tilemap[i + offsetX][j + offsetY].getFireSpreadRisk())
#rint(tilemap.tilemap[i + offsetX][j + offsetY].getFireSpreadRisk() * 100)
if tilemap.tilemap[i + offsetX][j + offsetY].getFireSpreadRisk() * 100 > 20:
pygame.draw.rect(screen,calculateColour(tilemap.tilemap[i + offsetX][j + offsetY].getFireSpreadRisk() * 100),(188 + (i * mapX) / zoom, 208+(j*mapY)/(zoom)+3, mapX/zoom, mapY/zoom))
if tilemap.tilemap[i + offsetX][j + offsetY].getHasFire():
image = calculateFire(tilemap.tilemap[i + offsetX][j + offsetY].getFireIntensity())
image = pygame.transform.scale(image, ( int(mapX/zoom), int(mapY/zoom)))#.set_colorkey((255,255,255))
#image.set_colorkey((255,255,255))
screen.blit(image, ([int(188 + (i * mapX) / zoom), int( 208+(j*mapY)/(zoom)+3)]))
if extraRect:
if flashingTile.ID[0] == i + offsetX and flashingTile.ID[1] == j + offsetY:
pygame.draw.rect(screen,(255,255,255),(188 + (i * mapX) / zoom, 208+(j*mapY)/(zoom)+3, mapX/zoom, mapY/zoom))
########### draw borders
pygame.draw.rect(screen,(255,255,255),(960 - 4, 108, 6, 864)) #middle
pygame.draw.rect(screen,(255,255,255),(192, 208, 1536, 6)) #middle row
pygame.draw.rect(screen,(255,255,255),(192, 108, 1536, 6)) #top
pygame.draw.rect(screen,(255,255,255),(192, 108, 6, 864)) #left
pygame.draw.rect(screen,(255,255,255),(1728 - 6, 108, 6, 864)) #right
pygame.draw.rect(screen,(255,255,255),(192, 972 - 6, 1536, 6)) #bottom
pygame.display.flip()
if quitButton.press():
pygame.quit()
running = False
thread1.join()
thread2.join()
|
HiwinRA605_socket_ros_test_20190625191140.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 0 #假設手臂Ready
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = int('%s'%req.Speedmode)
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while(Arm_feedback == 0):
##---------------socket 傳輸手臂命令-----------------
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
doom_multiagent_wrapper.py
|
import threading
import time
from enum import Enum
from multiprocessing import Process
from queue import Empty, Queue
import faster_fifo
import cv2
import filelock
import gym
from filelock import FileLock
from envs.doom.doom_gym import doom_lock_file
from envs.doom.doom_render import concat_grid, cvt_doom_obs
from envs.doom.multiplayer.doom_multiagent import find_available_port, DEFAULT_UDP_PORT
from envs.env_utils import RewardShapingInterface, get_default_reward_shaping
from utils.utils import log
from functools import wraps
from time import sleep
def retry_dm(exception_class=Exception, num_attempts=3, sleep_time=1, should_reset=False):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
for i in range(num_attempts):
try:
return func(*args, **kwargs)
except exception_class as e:
# This accesses the self instance variable
multiagent_wrapper_obj = args[0]
multiagent_wrapper_obj.initialized = False
multiagent_wrapper_obj.close()
# This is done to reset if it is in the step function
if should_reset:
multiagent_wrapper_obj.reset()
if i == num_attempts - 1:
raise
else:
log.error('Failed with error %r, trying again', e)
sleep(sleep_time)
return wrapper
return decorator
def safe_get(q, timeout=1e6, msg='Queue timeout'):
"""Using queue.get() with timeout is necessary, otherwise KeyboardInterrupt is not handled."""
while True:
try:
return q.get(timeout=timeout)
except Empty:
log.warning(msg)
def udp_port_num(env_config):
if env_config is None:
return DEFAULT_UDP_PORT
port_to_use = DEFAULT_UDP_PORT + 100 * env_config.worker_index + env_config.vector_index
return port_to_use
class TaskType(Enum):
INIT, TERMINATE, RESET, STEP, STEP_UPDATE, INFO, SET_ATTR = range(7)
def init_multiplayer_env(make_env_func, player_id, env_config, init_info=None):
env = make_env_func(player_id=player_id)
if env_config is not None and 'worker_index' in env_config:
env.unwrapped.worker_index = env_config.worker_index
if env_config is not None and 'vector_index' in env_config:
env.unwrapped.vector_index = env_config.vector_index
if init_info is None:
port_to_use = udp_port_num(env_config)
port = find_available_port(port_to_use, increment=1000)
log.debug('Using port %d', port)
init_info = dict(port=port)
env.unwrapped.init_info = init_info
env.seed(env.unwrapped.worker_index * 1000 + env.unwrapped.vector_index * 10 + player_id)
return env
class MultiAgentEnvWorker:
def __init__(self, player_id, make_env_func, env_config, use_multiprocessing=False, reset_on_init=True):
self.player_id = player_id
self.make_env_func = make_env_func
self.env_config = env_config
self.reset_on_init = reset_on_init
if use_multiprocessing:
self.process = Process(target=self.start, daemon=False)
self.task_queue, self.result_queue = faster_fifo.Queue(), faster_fifo.Queue()
else:
self.process = threading.Thread(target=self.start)
self.task_queue, self.result_queue = Queue(), Queue()
self.process.start()
def _init(self, init_info):
log.info('Initializing env for player %d, init_info: %r...', self.player_id, init_info)
env = init_multiplayer_env(self.make_env_func, self.player_id, self.env_config, init_info)
if self.reset_on_init:
env.reset()
return env
@staticmethod
def _terminate(env):
if env is None:
return
env.close()
@staticmethod
def _get_info(env):
"""Specific to custom VizDoom environments."""
info = {}
if hasattr(env.unwrapped, 'get_info_all'):
info = env.unwrapped.get_info_all() # info for the new episode
return info
def _set_env_attr(self, env, player_id, attr_chain, value):
"""Allows us to set an arbitrary attribute of the environment, e.g. attr_chain can be unwrapped.foo.bar"""
assert player_id == self.player_id
attrs = attr_chain.split('.')
curr_attr = env
try:
for attr_name in attrs[:-1]:
curr_attr = getattr(curr_attr, attr_name)
except AttributeError:
log.error('Env does not have an attribute %s', attr_chain)
attr_to_set = attrs[-1]
setattr(curr_attr, attr_to_set, value)
def start(self):
env = None
while True:
data, task_type = safe_get(self.task_queue)
if task_type == TaskType.INIT:
env = self._init(data)
self.result_queue.put(None) # signal we're done
continue
if task_type == TaskType.TERMINATE:
self._terminate(env)
break
results = None
if task_type == TaskType.RESET:
results = env.reset()
elif task_type == TaskType.INFO:
results = self._get_info(env)
elif task_type == TaskType.STEP or task_type == TaskType.STEP_UPDATE:
# collect obs, reward, done, and info
action = data
env.unwrapped.update_state = task_type == TaskType.STEP_UPDATE
results = env.step(action)
elif task_type == TaskType.SET_ATTR:
player_id, attr_chain, value = data
self._set_env_attr(env, player_id, attr_chain, value)
else:
raise Exception(f'Unknown task type {task_type}')
self.result_queue.put(results)
class MultiAgentEnv(gym.Env, RewardShapingInterface):
def __init__(self, num_agents, make_env_func, env_config, skip_frames):
gym.Env.__init__(self)
RewardShapingInterface.__init__(self)
self.num_agents = num_agents
log.debug('Multi agent env, num agents: %d', self.num_agents)
self.skip_frames = skip_frames # number of frames to skip (1 = no skip)
env = make_env_func(player_id=-1) # temporary env just to query observation_space and stuff
self.action_space = env.action_space
self.observation_space = env.observation_space
self.default_reward_shaping = get_default_reward_shaping(env)
env.close()
self.current_reward_shaping = [self.default_reward_shaping for _ in self.num_agents]
self.make_env_func = make_env_func
self.safe_init = env_config is not None and env_config.get('safe_init', False)
if self.safe_init:
sleep_seconds = env_config.worker_index * 1.0
log.info('Sleeping %.3f seconds to avoid creating all envs at once', sleep_seconds)
time.sleep(sleep_seconds)
log.info('Done sleeping at %d', env_config.worker_index)
self.env_config = env_config
self.workers = None
# only needed when rendering
self.enable_rendering = False
self.last_obs = None
self.reset_on_init = True
self.initialized = False
def get_default_reward_shaping(self):
return self.default_reward_shaping
def get_current_reward_shaping(self, agent_idx: int):
return self.current_reward_shaping[agent_idx]
def set_reward_shaping(self, reward_shaping: dict, agent_idx: int):
self.current_reward_shaping[agent_idx] = reward_shaping
self.set_env_attr(
agent_idx, 'unwrapped.reward_shaping_interface.reward_shaping_scheme', reward_shaping,
)
def await_tasks(self, data, task_type, timeout=None):
"""
Task result is always a tuple of lists, e.g.:
(
[0th_agent_obs, 1st_agent_obs, ... ],
[0th_agent_reward, 1st_agent_reward, ... ],
...
)
If your "task" returns only one result per agent (e.g. reset() returns only the observation),
the result will be a tuple of length 1. It is a responsibility of the caller to index appropriately.
"""
if data is None:
data = [None] * self.num_agents
assert len(data) == self.num_agents
for i, worker in enumerate(self.workers):
worker.task_queue.put((data[i], task_type))
result_lists = None
for i, worker in enumerate(self.workers):
results = safe_get(
worker.result_queue,
timeout=0.2 if timeout is None else timeout,
msg=f'Takes a surprisingly long time to process task {task_type}, retry...',
)
if not isinstance(results, (tuple, list)):
results = [results]
if result_lists is None:
result_lists = tuple([] for _ in results)
for j, r in enumerate(results):
result_lists[j].append(r)
return result_lists
def _ensure_initialized(self):
if self.initialized:
return
self.workers = [
MultiAgentEnvWorker(i, self.make_env_func, self.env_config, reset_on_init=self.reset_on_init)
for i in range(self.num_agents)
]
init_attempt = 0
while True:
init_attempt += 1
try:
port_to_use = udp_port_num(self.env_config)
port = find_available_port(port_to_use, increment=1000)
log.debug('Using port %d', port)
init_info = dict(port=port)
lock_file = doom_lock_file(max_parallel=20)
lock = FileLock(lock_file)
with lock.acquire(timeout=10):
for i, worker in enumerate(self.workers):
worker.task_queue.put((init_info, TaskType.INIT))
if self.safe_init:
time.sleep(1.0) # just in case
else:
time.sleep(0.05)
for i, worker in enumerate(self.workers):
worker.result_queue.get(timeout=20)
except filelock.Timeout:
continue
except Exception:
raise RuntimeError('Critical error: worker stuck on initialization. Abort!')
else:
break
log.debug('%d agent workers initialized for env %d!', len(self.workers), self.env_config.worker_index)
self.initialized = True
@retry_dm(exception_class=Exception, num_attempts=3, sleep_time=1, should_reset=False)
def info(self):
self._ensure_initialized()
info = self.await_tasks(None, TaskType.INFO)[0]
return info
@retry_dm(exception_class=Exception, num_attempts=3, sleep_time=1, should_reset=False)
def reset(self):
self._ensure_initialized()
observation = self.await_tasks(None, TaskType.RESET, timeout=2.0)[0]
return observation
@retry_dm(exception_class=Exception, num_attempts=3, sleep_time=1, should_reset=True)
def step(self, actions):
self._ensure_initialized()
for frame in range(self.skip_frames - 1):
self.await_tasks(actions, TaskType.STEP)
obs, rew, dones, infos = self.await_tasks(actions, TaskType.STEP_UPDATE)
for info in infos:
info['num_frames'] = self.skip_frames
if all(dones):
obs = self.await_tasks(None, TaskType.RESET, timeout=2.0)[0]
if self.enable_rendering:
self.last_obs = obs
return obs, rew, dones, infos
# noinspection PyUnusedLocal
def render(self, *args, **kwargs):
self.enable_rendering = True
if self.last_obs is None:
return
render_multiagent = True
if render_multiagent:
obs_display = [o['obs'] for o in self.last_obs]
obs_grid = concat_grid(obs_display)
cv2.imshow('vizdoom', obs_grid)
else:
obs_display = self.last_obs[0]['obs']
cv2.imshow('vizdoom', cvt_doom_obs(obs_display))
cv2.waitKey(1)
def close(self):
if self.workers is not None:
# log.info('Stopping multiagent env %d...', self.env_config.worker_index)
for worker in self.workers:
worker.task_queue.put((None, TaskType.TERMINATE))
time.sleep(0.1)
for worker in self.workers:
worker.process.join()
def seed(self, seed=None):
"""Does not really make sense for the wrapper. Individual envs will be uniquely seeded on init."""
pass
def set_env_attr(self, agent_idx, attr_chain, value):
data = (agent_idx, attr_chain, value)
worker = self.workers[agent_idx]
worker.task_queue.put((data, TaskType.SET_ATTR))
result = safe_get(worker.result_queue, timeout=0.1)
assert result is None
|
test_socketserver.py
|
"""
Test suite for socketserver.
"""
import contextlib
import io
import os
import select
import signal
import socket
import tempfile
import threading
import unittest
import socketserver
import test.support
from test.support import reap_children, reap_threads, verbose, os_helper
test.support.requires("network")
TEST_STR = b"hello world\n"
HOST = test.support.HOST
HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
requires_unix_sockets = unittest.skipUnless(HAVE_UNIX_SOCKETS,
'requires Unix sockets')
HAVE_FORKING = hasattr(os, "fork")
requires_forking = unittest.skipUnless(HAVE_FORKING, 'requires forking')
def signal_alarm(n):
"""Call signal.alarm when it exists (i.e. not on Windows)."""
if hasattr(signal, 'alarm'):
signal.alarm(n)
# Remember real select() to avoid interferences with mocking
_real_select = select.select
def receive(sock, n, timeout=20):
r, w, x = _real_select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
else:
raise RuntimeError("timed out on %r" % (sock,))
if HAVE_UNIX_SOCKETS and HAVE_FORKING:
class ForkingUnixStreamServer(socketserver.ForkingMixIn,
socketserver.UnixStreamServer):
pass
class ForkingUnixDatagramServer(socketserver.ForkingMixIn,
socketserver.UnixDatagramServer):
pass
@contextlib.contextmanager
def simple_subprocess(testcase):
"""Tests that a custom child process is not waited on (Issue 1540386)"""
pid = os.fork()
if pid == 0:
# Don't raise an exception; it would be caught by the test harness.
os._exit(72)
try:
yield None
except:
raise
finally:
pid2, status = os.waitpid(pid, 0)
testcase.assertEqual(pid2, pid)
testcase.assertEqual(72 << 8, status)
class SocketServerTest(unittest.TestCase):
"""Test all socket servers."""
def setUp(self):
signal_alarm(60) # Kill deadlocks after 60 seconds.
self.port_seed = 0
self.test_files = []
def tearDown(self):
signal_alarm(0) # Didn't deadlock.
reap_children()
for fn in self.test_files:
try:
os.remove(fn)
except OSError:
pass
self.test_files[:] = []
def pickaddr(self, proto):
if proto == socket.AF_INET:
return (HOST, 0)
else:
# XXX: We need a way to tell AF_UNIX to pick its own name
# like AF_INET provides port==0.
dir = None
fn = tempfile.mktemp(prefix='unix_socket.', dir=dir)
self.test_files.append(fn)
return fn
def make_server(self, addr, svrcls, hdlrbase):
class MyServer(svrcls):
def handle_error(self, request, client_address):
self.close_request(request)
raise
class MyHandler(hdlrbase):
def handle(self):
line = self.rfile.readline()
self.wfile.write(line)
if verbose: print("creating server")
try:
server = MyServer(addr, MyHandler)
except PermissionError as e:
# Issue 29184: cannot bind() a Unix socket on Android.
self.skipTest('Cannot create server (%s, %s): %s' %
(svrcls, addr, e))
self.assertEqual(server.server_address, server.socket.getsockname())
return server
@reap_threads
def run_server(self, svrcls, hdlrbase, testfunc):
server = self.make_server(self.pickaddr(svrcls.address_family),
svrcls, hdlrbase)
# We had the OS pick a port, so pull the real address out of
# the server.
addr = server.server_address
if verbose:
print("ADDR =", addr)
print("CLASS =", svrcls)
t = threading.Thread(
name='%s serving' % svrcls,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print("server running")
for i in range(3):
if verbose: print("test client", i)
testfunc(svrcls.address_family, addr)
if verbose: print("waiting for server")
server.shutdown()
t.join()
server.server_close()
self.assertEqual(-1, server.socket.fileno())
if HAVE_FORKING and isinstance(server, socketserver.ForkingMixIn):
# bpo-31151: Check that ForkingMixIn.server_close() waits until
# all children completed
self.assertFalse(server.active_children)
if verbose: print("done")
def stream_examine(self, proto, addr):
with socket.socket(proto, socket.SOCK_STREAM) as s:
s.connect(addr)
s.sendall(TEST_STR)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
def dgram_examine(self, proto, addr):
with socket.socket(proto, socket.SOCK_DGRAM) as s:
if HAVE_UNIX_SOCKETS and proto == socket.AF_UNIX:
s.bind(self.pickaddr(proto))
s.sendto(TEST_STR, addr)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
def test_TCPServer(self):
self.run_server(socketserver.TCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_ThreadingTCPServer(self):
self.run_server(socketserver.ThreadingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_forking
def test_ForkingTCPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_UnixStreamServer(self):
self.run_server(socketserver.UnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_ThreadingUnixStreamServer(self):
self.run_server(socketserver.ThreadingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
@requires_forking
def test_ForkingUnixStreamServer(self):
with simple_subprocess(self):
self.run_server(ForkingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_UDPServer(self):
self.run_server(socketserver.UDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
def test_ThreadingUDPServer(self):
self.run_server(socketserver.ThreadingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_forking
def test_ForkingUDPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
def test_UnixDatagramServer(self):
self.run_server(socketserver.UnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
def test_ThreadingUnixDatagramServer(self):
self.run_server(socketserver.ThreadingUnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
@requires_forking
def test_ForkingUnixDatagramServer(self):
self.run_server(ForkingUnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@reap_threads
def test_shutdown(self):
# Issue #2302: shutdown() should always succeed in making an
# other thread leave serve_forever().
class MyServer(socketserver.TCPServer):
pass
class MyHandler(socketserver.StreamRequestHandler):
pass
threads = []
for i in range(20):
s = MyServer((HOST, 0), MyHandler)
t = threading.Thread(
name='MyServer serving',
target=s.serve_forever,
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
threads.append((t, s))
for t, s in threads:
t.start()
s.shutdown()
for t, s in threads:
t.join()
s.server_close()
def test_close_immediately(self):
class MyServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
server = MyServer((HOST, 0), lambda: None)
server.server_close()
def test_tcpserver_bind_leak(self):
# Issue #22435: the server socket wouldn't be closed if bind()/listen()
# failed.
# Create many servers for which bind() will fail, to see if this result
# in FD exhaustion.
for i in range(1024):
with self.assertRaises(OverflowError):
socketserver.TCPServer((HOST, -1),
socketserver.StreamRequestHandler)
def test_context_manager(self):
with socketserver.TCPServer((HOST, 0),
socketserver.StreamRequestHandler) as server:
pass
self.assertEqual(-1, server.socket.fileno())
class ErrorHandlerTest(unittest.TestCase):
"""Test that the servers pass normal exceptions from the handler to
handle_error(), and that exiting exceptions like SystemExit and
KeyboardInterrupt are not passed."""
def tearDown(self):
test.os_helper.unlink(test.os_helper.TESTFN)
def test_sync_handled(self):
BaseErrorTestServer(ValueError)
self.check_result(handled=True)
def test_sync_not_handled(self):
with self.assertRaises(SystemExit):
BaseErrorTestServer(SystemExit)
self.check_result(handled=False)
def test_threading_handled(self):
ThreadingErrorTestServer(ValueError)
self.check_result(handled=True)
def test_threading_not_handled(self):
ThreadingErrorTestServer(SystemExit)
self.check_result(handled=False)
@requires_forking
def test_forking_handled(self):
ForkingErrorTestServer(ValueError)
self.check_result(handled=True)
@requires_forking
def test_forking_not_handled(self):
ForkingErrorTestServer(SystemExit)
self.check_result(handled=False)
def check_result(self, handled):
with open(test.os_helper.TESTFN) as log:
expected = 'Handler called\n' + 'Error handled\n' * handled
self.assertEqual(log.read(), expected)
class BaseErrorTestServer(socketserver.TCPServer):
def __init__(self, exception):
self.exception = exception
super().__init__((HOST, 0), BadHandler)
with socket.create_connection(self.server_address):
pass
try:
self.handle_request()
finally:
self.server_close()
self.wait_done()
def handle_error(self, request, client_address):
with open(test.os_helper.TESTFN, 'a') as log:
log.write('Error handled\n')
def wait_done(self):
pass
class BadHandler(socketserver.BaseRequestHandler):
def handle(self):
with open(test.os_helper.TESTFN, 'a') as log:
log.write('Handler called\n')
raise self.server.exception('Test error')
class ThreadingErrorTestServer(socketserver.ThreadingMixIn,
BaseErrorTestServer):
def __init__(self, *pos, **kw):
self.done = threading.Event()
super().__init__(*pos, **kw)
def shutdown_request(self, *pos, **kw):
super().shutdown_request(*pos, **kw)
self.done.set()
def wait_done(self):
self.done.wait()
if HAVE_FORKING:
class ForkingErrorTestServer(socketserver.ForkingMixIn, BaseErrorTestServer):
pass
class SocketWriterTest(unittest.TestCase):
def test_basics(self):
class Handler(socketserver.StreamRequestHandler):
def handle(self):
self.server.wfile = self.wfile
self.server.wfile_fileno = self.wfile.fileno()
self.server.request_fileno = self.request.fileno()
server = socketserver.TCPServer((HOST, 0), Handler)
self.addCleanup(server.server_close)
s = socket.socket(
server.address_family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
with s:
s.connect(server.server_address)
server.handle_request()
self.assertIsInstance(server.wfile, io.BufferedIOBase)
self.assertEqual(server.wfile_fileno, server.request_fileno)
def test_write(self):
# Test that wfile.write() sends data immediately, and that it does
# not truncate sends when interrupted by a Unix signal
pthread_kill = test.support.get_attribute(signal, 'pthread_kill')
class Handler(socketserver.StreamRequestHandler):
def handle(self):
self.server.sent1 = self.wfile.write(b'write data\n')
# Should be sent immediately, without requiring flush()
self.server.received = self.rfile.readline()
big_chunk = b'\0' * test.support.SOCK_MAX_SIZE
self.server.sent2 = self.wfile.write(big_chunk)
server = socketserver.TCPServer((HOST, 0), Handler)
self.addCleanup(server.server_close)
interrupted = threading.Event()
def signal_handler(signum, frame):
interrupted.set()
original = signal.signal(signal.SIGUSR1, signal_handler)
self.addCleanup(signal.signal, signal.SIGUSR1, original)
response1 = None
received2 = None
main_thread = threading.get_ident()
def run_client():
s = socket.socket(server.address_family, socket.SOCK_STREAM,
socket.IPPROTO_TCP)
with s, s.makefile('rb') as reader:
s.connect(server.server_address)
nonlocal response1
response1 = reader.readline()
s.sendall(b'client response\n')
reader.read(100)
# The main thread should now be blocking in a send() syscall.
# But in theory, it could get interrupted by other signals,
# and then retried. So keep sending the signal in a loop, in
# case an earlier signal happens to be delivered at an
# inconvenient moment.
while True:
pthread_kill(main_thread, signal.SIGUSR1)
if interrupted.wait(timeout=float(1)):
break
nonlocal received2
received2 = len(reader.read())
background = threading.Thread(target=run_client)
background.start()
server.handle_request()
background.join()
self.assertEqual(server.sent1, len(response1))
self.assertEqual(response1, b'write data\n')
self.assertEqual(server.received, b'client response\n')
self.assertEqual(server.sent2, test.support.SOCK_MAX_SIZE)
self.assertEqual(received2, test.support.SOCK_MAX_SIZE - 100)
class MiscTestCase(unittest.TestCase):
def test_all(self):
# objects defined in the module should be in __all__
expected = []
for name in dir(socketserver):
if not name.startswith('_'):
mod_object = getattr(socketserver, name)
if getattr(mod_object, '__module__', None) == 'socketserver':
expected.append(name)
self.assertCountEqual(socketserver.__all__, expected)
def test_shutdown_request_called_if_verify_request_false(self):
# Issue #26309: BaseServer should call shutdown_request even if
# verify_request is False
class MyServer(socketserver.TCPServer):
def verify_request(self, request, client_address):
return False
shutdown_called = 0
def shutdown_request(self, request):
self.shutdown_called += 1
socketserver.TCPServer.shutdown_request(self, request)
server = MyServer((HOST, 0), socketserver.StreamRequestHandler)
s = socket.socket(server.address_family, socket.SOCK_STREAM)
s.connect(server.server_address)
s.close()
server.handle_request()
self.assertEqual(server.shutdown_called, 1)
server.server_close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_threads_reaped(self):
"""
In #37193, users reported a memory leak
due to the saving of every request thread. Ensure that
not all threads are kept forever.
"""
class MyServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
server = MyServer((HOST, 0), socketserver.StreamRequestHandler)
for n in range(10):
with socket.create_connection(server.server_address):
server.handle_request()
self.assertLess(len(server._threads), 10)
server.server_close()
if __name__ == "__main__":
unittest.main()
|
watchdog.py
|
# -*- coding: utf-8 -*-
from kazoo.client import KazooClient
import os
import sys
import logging
import time
import signal
from multiprocessing import Process
main_dir = "obj"
signal_dir = '/signal/suning'
task_type = "suning"
def run_proc():
os.chdir(main_dir +"suning/suning/spiders")
#arg = ["HELLO","crawl", "spider_" + task_type,"--nolog"]
arg = ["HELLO","crawl", "spider_" + task_type]
os.execvp("scrapy",arg)
def run_wait(a,b):
try:
os.waitpid(-1, os.WNOHANG)
except Exception,e:
print "no child"
signal.signal(signal.SIGCHLD, run_wait)
watchPid = []
for i in range(1,len(sys.argv)):
watchPid.append(int(sys.argv[i]))
hosts_list = ['123.206.89.123:2181', '123.207.157.135:2181', '118.89.234.46:2181']
signal_dic = {"stop":signal.SIGKILL, "start":signal.SIGCONT, "pause":signal.SIGSTOP, "continue":signal.SIGCONT}
zk = KazooClient(hosts = hosts_list)
logging.basicConfig()
zk.start()
print "watch dog working"
stop_flag = False
@zk.ChildrenWatch(signal_dir)
def signal_watch(children):
if len(children) != 0:
global watchPid
for pid in watchPid:
os.kill(pid, signal_dic[children[0]])
if children[0] == "stop":
global stop_flag
stop_flag = True
def check(pid):
global stop_flag
if stop_flag == True:
sys.exit(0)
try:
os.kill(pid, 0)
return pid
except Exception: #判断
p = Process(target=run_proc)
p.start()
return p.pid
while True:
print "begin check"
global stop_flag
if stop_flag == True:
sys.exit(0)
for pid in watchPid:
newpid = check(pid)
if stop_flag == True:
sys.exit(0)
if newpid != pid:
print "new process"
watchPid.remove(pid)
watchPid.append(newpid)
time.sleep(5)
|
kuJjikopalambhan.py
|
from keyboard import (
add_hotkey,
on_press,
on_release_key,
all_modifiers,
on_press_key,
write,
press_and_release,
add_hotkey,
unhook_all as key_unhook,
)
from mouse import on_click, on_middle_click, on_right_click, unhook_all as mouse_unhook
from time import time, sleep
from threading import Thread
from json import loads
# sarve_bhAShA
sarve_bhAShA = "#$',-.0123456789;ACDEGHIJKLMNOQRSTUWYabcdefghijklmnopqrstuvwxyz"
# sarve_bhAShA
class varna:
def __init__(self, v, t):
self.name = v
self.time = time()
class kuYjikolambhikam:
def __init__(self, obj):
self.obj = obj
self.ks = obj.get("ks")
self.get = lambda x, v=0: obj.get(x, v)
th = Thread(target=self.__check_value_updates)
th.daemon = True
th.start()
self.main = parivartana(self)
self.modifier_press_status = (False, "", 0)
self.last_time = time()
self.shortcut_press = False
self.single_alt = True
self.vrn = varna("", -1)
def __check_value_updates(self):
self.__start_all_listeners()
self.t = 0
t = 0.1
while True:
a = self.get("get_val_change")[1]
if a:
self.update()
self.get("set_val_change", 1)
else:
sleep(t)
if self.ks == 1:
self.t += t
if self.t > 600.0 and self.ks == 1:
self.ks = 0
self.t = 0
self.get("time_exceed")
self.get("change_less")
def time_elphased(self, t=time()):
elph = t - self.last_time
self.last_time = t
return elph
def __start_all_listeners(self):
def change():
self.ks = abs(self.ks - 1)
if self.main.sg_status and self.main.sg:
self.get("hide_sg")
self.main.sg_status = False
self.get("change_less")
on_release_key("windows", self.on_release)
on_release_key("shift", self.on_release)
on_release_key("alt", self.on_release)
on_release_key("ctrl", self.on_release)
add_hotkey("ctrl+cmd", change)
add_hotkey("windows+f6", lambda: press_and_release("volume up"))
add_hotkey("windows+f5", lambda: press_and_release("volume down"))
add_hotkey("windows+esc", lambda: self.get("close_from"))
for x in sarve_bhAShA:
on_press_key(x, self.process_key, suppress=True)
on_press(self.detect_key)
on_click(lambda: self.clear(mouse=True))
on_middle_click(lambda: self.clear(mouse=True))
on_right_click(lambda: self.clear(mouse=True))
def on_release(self, c):
self.single_alt = True
if self.modifier_press_status[0]:
self.modifier_press_status = (False, "", 0)
self.shortcut_press = False
def detect_key(self, key):
tm = key.time
key = key.name
if key == None:
return
more = True
if self.modifier_press_status[0] and key != self.modifier_press_status[1]:
self.shortcut_press = True
if (
key == "backspace"
and self.main.pUrva_lekhit[2][1] == 1
and self.main.sa_lang == 1
and self.ks == 1
):
send_keys("backspace")
self.clear()
elif (
self.ks == 1
and self.main.current_lang_code == "Urdu"
and key in ("?",)
):
send_keys("backspace")
write(self.main.aksharANI[key][key][0])
self.clear()
elif "shift" in key or "caps" in key or self.modifier_press_status[0]:
more = False
else:
self.clear()
if key in all_modifiers and not self.modifier_press_status[0]:
self.modifier_press_status = (True, key, tm)
if self.main.sg_status and self.main.sg and self.ks == 1 and more:
self.get("hide_sg")
self.main.sg_status = False
if (
"alt" in key
and tm - self.modifier_press_status[2] >= 1
and not self.shortcut_press
and self.single_alt
):
self.get("show_status")
self.single_alt = False
def process_key(self, key, already_ready=False):
self.t = 0
t = self.time_elphased(key.time)
if t > 15:
self.clear()
key = key.name if not already_ready else key
if len(key) > 1:
send_keys(key)
if self.ks == 1:
self.clear()
elif (
self.modifier_press_status[0]
and "shift" not in self.modifier_press_status[1]
):
self.shortcut_press = True
press_and_release(key)
self.clear()
elif self.ks == 0:
send_keys(key)
else:
self.main.prakriyA(key)
def clear(self, mouse=False):
if mouse:
if self.get("img_pressed"):
self.get("reset_img_pressed")
return
elif self.get("varna_pressed"):
self.get("reset_varna_pressed")
return
if self.ks == 0:
return
self.main.clear_all_val(True)
def update(self):
m = self.get("msg")
if len(m) == 0:
return
for x in m:
if x == "update_ks":
self.ks = self.get("ks")
if self.ks == 1:
self.t = 0
elif x == "clear_vals":
self.clear()
elif x == "change_lang":
self.main.set_typing_lang(self.get("lang"))
elif x == "update_sa":
self.main.sa_lang = self.get("sa")
elif x == "sg":
self.main.sg = not self.main.sg
elif x == "clear_sg":
self.main.capital = [0, "", -1, -1, 0, 0, False]
elif x == "clicked":
val = self.get("clicked")
l = len(val)
v = 0
for y in val:
v += 1
if v == l:
self.get("reset_no_procedure")
self.vrn.name = y
self.vrn.time = time()
self.process_key(self.vrn)
elif x == "restart":
key_unhook()
mouse_unhook()
self.__start_all_listeners()
self.get("null_msg")
def send_keys(key):
if len(key) == 1:
write(key)
else:
press_and_release(key)
class parivartana:
def __init__(self, main):
self.main = main
self.loaded_scripts = []
self.akSharAH = {}
self.sa_lang = main.obj.get("sa")
lang = main.obj.get("lang")
self.first = True
self.set_typing_lang(lang)
if self.aksharANI["sa"] == 1 and self.sa_lang == 0:
self.sa_lang = 1
self.back_space = 0
self.sg_status = False
self.sg = self.main.get("sg_status")
self.small_to_capital = False
# Format of varNa = [key record, output, whats on screen]
self.varNa = ["", "", ""]
self.next_chars = ""
self.mAtrA_sthiti = False
self.halanta_add_status = False
# index,key itself,its varna sthiti,varna sthiti of char preceding it,current time,whats on screen its length
self.capital = [0, "", -1, -1, 0, 0, False]
self.d = False # for ch and chh like where c is free
self.store_last_of_3 = ""
self.last_of_3_status_for_mAtrA = False
self.special_ved_s = False
self.pUrva_lekhit = [["", -1], ["", -1], ["", -1], ["", -1], ["", -1]]
self.second_cap_time = 0
def set_typing_lang(self, lang):
if lang not in self.loaded_scripts:
fl = open(
f"resources/dattAMsh/{lang}.json",
encoding="utf-8",
mode="r+",
)
self.akSharAH[lang] = loads(fl.read())
fl.close()
self.loaded_scripts.append(lang)
self.aksharANI = self.akSharAH[lang]
self.current_lang_code = lang
self.sarve = [c for c in self.aksharANI]
if lang not in ("Urdu", "Romanized"):
self.halant = self.aksharANI["."][".x"][0]
self.sa_lang = self.aksharANI["sa"]
self.first = False
def prakriyA(self, key):
if self.next_chars == "" and key in self.sarve:
self.varNa[2] = ""
self.vitaraNa(key)
elif self.next_chars == "" and key.isupper() and key.lower() in self.sarve:
self.varNa[2] = ""
self.vitaraNa(key.lower())
elif self.next_chars != "":
if key in self.next_chars:
if self.d:
self.halanta_add_status = True
self.d = False
self.varNa[2] = self.varNa[1]
key = self.varNa[0] + key
self.vitaraNa(key)
elif key == "q" or key == ";":
self.main.get("hide_sg")
self.clear_all_val(True)
elif key in self.sarve: # resseting if next chars not continuable
self.clear_all_val()
if (
self.store_last_of_3 != ""
and self.pUrva_lekhit[4][1] != 0
and key == "#"
and self.current_lang_code == "Tamil-Extended"
):
self.clear_all_val(True)
self.vitaraNa(key)
elif key.isupper() and key.lower() in self.sarve:
self.clear_all_val()
self.vitaraNa(key.lower())
else:
send_keys(key)
self.clear_all_val(True)
else:
send_keys(key)
self.clear_all_val(True)
def vitaraNa(self, key):
cap_0_from_1 = [False, ["", -1]]
data = self.aksharANI[key[0]]
current = data[key]
prev_temp = self.pUrva_lekhit[3][1]
temp = self.pUrva_lekhit[4][1]
varna_sthiti = current[-1]
if self.capital[0] == 2:
if key == self.capital[1]:
if time() - self.capital[4] <= 4.0:
# converting small to capital
key = key.upper()
data = self.aksharANI[key]
current = data[key]
temp = self.capital[3]
varna_sthiti = current[-1]
self.back_space += 2 * self.capital[5]
if varna_sthiti == 0 and self.pUrva_lekhit[2][1] in [1, 3]:
cap_0_from_1 = [True, self.pUrva_lekhit[2]]
if self.capital[6]:
self.back_space -= 1
if self.sa_lang == 1:
if self.capital[3] == 1:
self.back_space -= 1
if self.capital[2] == 1:
self.back_space += 2
if self.capital[3] != 1 and self.capital[2] == 1:
self.back_space -= 1
if self.sa_lang == 0:
if self.capital[2] == 1:
self.back_space += 1
if self.capital[3] == 1 and not self.capital[6]:
self.back_space += 1
if self.sa_lang == 1 and self.capital[3] == 3:
self.back_space -= 1
self.capital = [3, "", -1, -1, 0, 0, False]
else:
self.capital = [
2,
key,
varna_sthiti,
self.pUrva_lekhit[3][1],
self.second_cap_time,
self.capital[5],
False,
]
else:
self.capital = [0, "", -1, -1, 0, 0, False]
if self.mAtrA_sthiti and varna_sthiti in [1, 2]: # for AUM like
self.clear_all_val(True)
self.prakriyA(key[-1])
return
# taking information of varana as [1 for svara and 0 others,max limit]
self.varNa[0] = key
# storing the state of varṇa i.e, svara or vyanjana
# taking out output letter in [1]
self.varNa[1] = current[0]
if temp in [1, 3]:
if (
varna_sthiti == 1
and key[-1] not in self.next_chars
and self.sa_lang == 0
): # for adding halant if char is not continuable
self.halanta_add_status = True
if temp == 3:
self.back_space += 1
self.varNa[1] = self.store_last_of_3 + self.varNa[1]
elif varna_sthiti == 0:
self.mAtrA_sthiti = True
if self.capital[0] == 1:
if key == self.capital[1]:
self.capital[0] = 2
self.second_cap_time = time()
elif (
key[-1] == self.capital[1]
and self.aksharANI[self.capital[1].upper()][self.capital[1].upper()][0]
!= self.varNa[1]
):
self.capital[6] = True
self.capital[0] = 2
self.capital[2] = varna_sthiti
self.capital[5] = len(self.varNa[1])
self.second_cap_time = time()
else:
self.capital = [0, "", -1, -1, 0, 0, False]
if (key == "LR" or key == "r3") and varna_sthiti == 0:
if prev_temp != 1:
self.mAtrA_sthiti = False
elif self.sa_lang == 0:
self.back_space += 1
if (
key == "R"
and self.current_lang_code == "Tamil"
and varna_sthiti == 2
and temp == 1
):
self.back_space += 1
if self.mAtrA_sthiti:
self.varNa[1] = current[1]
if self.sa_lang == 1 and temp == 1:
self.back_space += 1
if temp == 3:
self.back_space += 1
if self.sa_lang == 1:
self.back_space += 1
self.varNa[1] += self.store_last_of_3
self.last_of_3_status_for_mAtrA = True
elif temp == 0 and self.last_of_3_status_for_mAtrA:
self.varNa[1] += self.store_last_of_3
self.last_of_3_status_for_mAtrA = True
if (
self.current_lang_code == "Tamil-Extended"
and key == "M"
and (
(
(
prev_temp == 3
or (prev_temp == 0 and self.pUrva_lekhit[2][1] == 3)
)
and temp == 0
)
or (
self.capital[0] == 3
and (
(
self.pUrva_lekhit[1][1] == 3
or (
self.pUrva_lekhit[1][1] == 0
and self.pUrva_lekhit[0][1] == 3
)
)
and self.pUrva_lekhit[2][1] == 0
)
)
)
):
self.varNa[1] += self.store_last_of_3
self.back_space += 1
if self.current_lang_code == "Tamil-Extended" and key in ["#an", "#s"]:
if key == "#an" and (
(
self.pUrva_lekhit[1][1] == 3
or (self.pUrva_lekhit[1][1] == 0 and self.pUrva_lekhit[0][1] == 3)
)
and self.pUrva_lekhit[2][1] == 0
):
self.varNa[1] += self.store_last_of_3
self.back_space += 1
elif key == "#s" and (
(
self.pUrva_lekhit[2][1] == 3
or (self.pUrva_lekhit[2][1] == 0 and self.pUrva_lekhit[1][1] == 3)
)
and self.pUrva_lekhit[3][1] == 0
):
self.varNa[1] += self.store_last_of_3
self.back_space += 1
self.special_ved_s = True
if (
self.current_lang_code == "Tamil-Extended"
and key in ["#ss", "#sss"]
and self.special_ved_s
):
self.varNa[1] += self.store_last_of_3
if (
temp == 1
and varna_sthiti == 2
and len(key) == 1
and len(data) > 1
and self.sa_lang == 0
):
# for ch, chh for where c is nothing
for x in current[-2]:
if key + x in data:
if data[key + x][-1] == 1:
self.d = True
break
if (
("Tamil" in self.current_lang_code or self.current_lang_code == "Punjabi")
and key in ["R", "LR", "LRR", "RR"]
and varna_sthiti == 1
):
varna_sthiti = 2
if self.sa_lang == 1:
if varna_sthiti == 1:
self.varNa[1] += self.halant
elif varna_sthiti == 3:
self.varNa[1] = self.varNa[1][:-1] + \
self.halant + self.varNa[1][-1]
self.likha(
self.varNa[1], self.varNa[2], self.back_space, self.halanta_add_status
)
if self.capital[0] == 3:
self.capital = [0, "", -1, -1, 0, 0, False]
if (
len(key) == 1
and key.islower()
and key.upper() in self.aksharANI
and self.capital[0] == 0
):
a = [0, "", -1, -1, 0, 0, False]
b = [1, key, varna_sthiti, temp, time(), len(self.varNa[1]), False]
if key + key in data:
if self.aksharANI[key.upper()][key.upper()][0] != data[key + key][0]:
self.capital = b
else:
self.capital = a
else:
self.capital = b
self.next_chars = current[-2]
if self.sg:
a = {
"key": (key, self.next_chars),
"status": (temp, varna_sthiti),
"mAtrA": self.mAtrA_sthiti,
"special_cap": cap_0_from_1,
}
if self.capital[0] == 1:
a["cap"] = True
if key != ".":
self.main.get("show_sg", a)
else:
def jkl():
self.main.get("show_sg", a)
th = Thread(target=jkl)
th.daemon = True
th.start()
self.sg_status = True
elif self.sg_status and self.sg:
self.main.get("hide_sg")
self.sg_status = False
if varna_sthiti == 3:
self.store_last_of_3 = self.varNa[1][-1]
if self.next_chars == "":
self.clear_all_val()
self.pUrva_lekhit[0] = self.pUrva_lekhit[1]
self.pUrva_lekhit[1] = self.pUrva_lekhit[2]
self.pUrva_lekhit[2] = self.pUrva_lekhit[3]
self.pUrva_lekhit[3] = self.pUrva_lekhit[4]
self.pUrva_lekhit[4] = [key, varna_sthiti]
def clear_all_val(self, special=False):
self.next_chars = ""
self.varNa = ["", "", ""]
self.mAtrA_sthiti = False
self.last_of_3_status_for_mAtrA = False
self.special_ved_s = False
self.back_space = 0
if special:
self.store_last_of_3 = ""
self.pUrva_lekhit = [["", -1], ["", -1],
["", -1], ["", -1], ["", -1]]
self.main.get("hide_sg")
self.main.get("clear_sg_val")
self.capital = [0, "", -1, -1, 0, 0, False]
def likha(self, b, a, bk=0, hal=False):
# a = what is currently on screen
# b = it is that to which a has to be replaced
back = 0
lekha = ""
if a == "" or b == "":
lekha = b
back = len(a)
elif b[0] != a[0]:
lekha = b
back = len(a)
else:
x = 0
for v in a:
if len(b) == x:
break
if b[x] != a[x]:
break
x += 1
lekha = b[x:]
back = len(a) - x
back += bk # taking extra bksp's into consideratin
for m in range(back):
press_and_release("backspace")
if hal:
lekha = self.halant + lekha
self.back_space = 0
self.halanta_add_status = False
write(lekha)
|
main.py
|
import multiprocessing
import queue
import socketio
import sys
import signal
import argparse
import magcalibration
import cmdui
ap = argparse.ArgumentParser()
ap.add_argument("-f","--filename",required=False,help='Filename of csv mag data formatted as mx,my,mz',type=str,default='')
ap.add_argument("-s",'--server',required=False,help='address of backend',type=str,default='localhost')
ap.add_argument("-p","--port",required=False,help='port of backend',type=int,default=1337)
args = vars(ap.parse_args())
def startNetworkedMagCalApp(msgQ,
server='localhost',
port=1337):
magcal = magcalibration.MagCalibration(msgQ,server,port)
sio = socketio.Client()
sio.connect('http://'+server+':'+str(port),namespaces=['/telemetry'])
@sio.on('telemetry',namespace='/telemetry')
def on_telemetry(data):
magcal.updateData(data)
magcal.run()
def startFileMagCalApp(msgQ,
server:str=None,
port:int=None,
filename:str=''):
magcal = magcalibration.MagCalibration(msgQ,server,port,filename)
magcal.run()
if __name__ == '__main__':
multiprocessing.set_start_method('spawn',force=True) # must use spawn instead of fork on macos due to fork security in objc runtime
messageQueue = multiprocessing.Queue(10)
if args["filename"] != '':
p = multiprocessing.Process(target=startFileMagCalApp,args=(messageQueue,args["server"],args["port"],args["filename"],))
else:
p = multiprocessing.Process(target=startNetworkedMagCalApp,args=(messageQueue,args["server"],args["port"],))
p.start()
cmdui = cmdui.MagCalCmdUI(messageQueue)
cmdui.cmdloop()
p.terminate()
p.join()
sys.exit(0)
|
freeciv_bot.py
|
#!/usr/bin/python3
# -*- coding: latin-1 -*-
'''
@package freeciv_bot
'''
import socket
import sys
import struct
import array
import zlib
import io
import re
import asyncio
import threading
import time
from argparse import ArgumentParser
from datetime import datetime
PJOIN_REQ = 4
PSTART = 0
JOINED = 333
JOIN_REPLY = 5
AUTH_REP = 7
PING = 88
PONG = 89
BEGIN_TURN = 128
TIMEOUT_INFO = 244
GAME_INFO = 16
PAGE_MSG = 110
PAGE_MSG_PART = 248
SEND_CHAT = 26
JUMBO_SIZE = 65535
COMPRESSION_BORDER = 16*1024+1
JUMBO_BORDER = 64*1024-COMPRESSION_BORDER-1
JUMBO_LEN = 4
HEADER_LEN = 2
VERSION_20 = '+2.0 conn_ping_info username_info new_hack ReportFreezeFix AttrSerialFix extroutes extgameinfo exttechleakage voteinfo extglobalinfo+'
VERSION_25 = '+Freeciv-2.5-network nationset_change tech_cost split_reports extended_move_rate illness_ranges nonnatdef cheaper_small_wonders+'
VERSION_26 = '+Freeciv-2.6-network techloss_forgiveness+'
header_struct = struct.Struct('!H')
jumbo_struct = struct.Struct('!I')
prelogin_struct = struct.Struct('!HB')
frame_struct = struct.Struct('!HBB')
processing_started = struct.Struct('!c')
join_reply_struct = struct.Struct('!BsssI')
bool_struct = struct.Struct('!B')
int_struct = struct.Struct('!I')
float_struct = struct.Struct('!f')
double_struct = struct.Struct('!d')
char_struct = struct.Struct('!s')
send_from_now = False
speak = False
class race_db():
def __init__(self):
self.to_discord = []
self.timer_target = -1
self._lock = threading.Lock()
def append_discord_message(self, message):
with self._lock:
self.to_discord.append(message)
def pop_discord_message(self):
with self._lock:
msg = self.to_discord.pop(0)
return msg
def discord_len(self):
with self._lock:
z = len(self.to_discord)
return z
def get_timer_target(self):
with self._lock:
x = self.timer_target
return x
def set_timer_target(self, timer):
with self._lock:
self.timer_target = timer
ricer = race_db()
def unpack_bool(fbytes):
bbumbo = fbytes.read(1)
(by, ) = bool_struct.unpack(bbumbo)
return by
def unpack_string(fbytes):
blocks = []
while True:
bbumbo = fbytes.read(1)
if bbumbo == b'':
break
(by, ) = char_struct.unpack(bbumbo)
if by == b'\x00' or by == b'\x03':
break
blocks.append(by)
return b''.join(blocks).decode('ascii')
# freeciv float is int/100, lol
def unpack_float(fbytes):
bbumbo = fbytes.read(4)
(by, ) = int_struct.unpack(bbumbo)
by = by/100
return by
def unpack_double(fbytes):
bbumbo = fbytes.read(8)
(by, ) = double_struct.unpack(bbumbo)
return by
def unpack_int(fbytes):
bbumbo = fbytes.read(4)
(by, ) = struct.unpack('i', bbumbo)
return by
def say_hello(msg):
global sock_d
msg = msg.rstrip('[/c]')
msg = re.sub(r'\[c[^)]*\]', "", msg)
msg = re.sub(">", " ", msg)
msg = re.sub("<", " ", msg)
print(msg)
rep = re.search(r"\w+(?=\s*has connected from)", msg)
if (rep):
m = "Hello " + rep.group() + " !"
send_chat_msg(sock_d, m)
send_chat_msg(sock_d, "WTF U DOING HERE???")
else:
print("WTF SAY HELLO")
def try_reply(msg):
global speak
if not speak:
return
if len(msg) < 3:
return
cases = {
"has connected from": lambda: say_hello(msg)
}
for k in cases.keys():
if re.search(k, msg):
return cases.get(k, lambda: "err")()
def process_packet(pkt):
f = io.BytesIO(pkt)
global send_from_now
global ricer
bumbo = f.read(3)
(plen, pkt_type,) = prelogin_struct.unpack(bumbo)
ret = 0
if pkt_type == JOIN_REPLY:
if unpack_bool(f) == 0:
print("Cannot join to server")
print("LOGIN MESSAGE: ", unpack_string(f))
ret = JOINED
if pkt_type == 6:
print("AUTH REQ")
ret = 6
if pkt_type == 25:
# 4 bytes header after postlogin crap?
if (f.getbuffer().nbytes) > 4:
f.read(1)
dateTimeObj = datetime.now()
s = unpack_string(f)
if (bytes(s, 'ascii') != b'\x00'):
# remove colors
#s = re.sub(r'\[[^)]*\]', "",s)
msg = "{}:{}:{} CHAT: {}".format(
dateTimeObj.hour, dateTimeObj.minute, dateTimeObj.second, s)
print(msg)
if (send_from_now):
try_reply(msg)
ricer.append_discord_message(msg)
if pkt_type == TIMEOUT_INFO:
x = f.read(1)
if x == b'\x03':
r = unpack_float(f)
ricer.append_discord_message(r)
print("TIMEOUT INFO", int(r))
timer_t = time.perf_counter() + int(r)
ricer.set_timer_target(timer_t)
if pkt_type == PING:
ret = PONG
if pkt_type == BEGIN_TURN:
ricer.append_discord_message("New turn")
print("New turn")
if pkt_type == PAGE_MSG:
f.read(1)
len_left = plen
r = "*** REPORT ***"
print(r)
ricer.append_discord_message(r)
r = unpack_string(f)
ricer.append_discord_message(r)
print(r)
len_left -= len(r)
if (len_left < 2):
return ret
r = unpack_string(f)
ricer.append_discord_message(r)
print(r)
if pkt_type == PAGE_MSG_PART:
f.read(1)
print(unpack_string(f))
return ret
# splits jumbo packet to single packets
def process_jumbo(jumbo):
f = io.BytesIO(jumbo)
x = 0
rets = []
while True:
blocks = []
bumbo = f.read(3)
if bumbo == b'':
return rets
blocks.append(bumbo)
(lenx, pkt_type,) = prelogin_struct.unpack(bumbo)
r = f.read(lenx - 3)
blocks.append(r)
rrrr = b''.join(blocks)
rets.append(process_packet(rrrr))
x += 1
return rets
def recvall(sock, length, xdecompres):
blocks = []
while length:
block = sock.recv(length)
# print('block', block)
# print('bytes left: ', length)
if not block:
raise EOFError('socket closed: %d bytes left'.format(length))
length -= len(block)
blocks.append(block)
rep = b''.join(blocks)
if xdecompres:
#print("uncompressed :{}".format(len(rep)))
rep = zlib.decompress(rep)
#print("decompressed :{}".format(len(rep)))
return rep
# gets whole packet with given size or jumbo packet
def get_block(sock):
decompr = False
is_jumbo = False
blocks = []
data = recvall(sock, header_struct.size, False)
(block_length,) = header_struct.unpack(data)
bl = block_length
# print("HSZ", block_length)
# print("HSZ data", data)
if (block_length != JUMBO_SIZE) and (block_length < COMPRESSION_BORDER):
blocks.append(data)
if block_length == JUMBO_SIZE:
is_jumbo = True
data = recvall(sock, jumbo_struct.size, False)
(bl,) = jumbo_struct.unpack(data)
bl = bl - JUMBO_LEN
decompr = True
elif block_length >= COMPRESSION_BORDER:
decompr = True
#data = recvall(sock, header_struct.size, False)
bl = bl - COMPRESSION_BORDER
block_length = bl
y = recvall(sock, block_length - HEADER_LEN, decompr)
blocks.append(y)
return b''.join(blocks)
# sends packet to server
def put_block(sock, message):
block_length = len(message)
sock.send(header_struct.pack(block_length))
sock.send(message)
# packet header with size of packet (except jumbo packet)
def get_header(sock):
header = sock.recv(2)
x = struct.unpack('!H', header)
return x[0]
# new packet without header
def get_message(sock, len):
sock.recv(len - 2)
# replies to server ping
def send_pong(sock):
sock.sendall(put_size(pack_8bit([0, 0, PONG, ])))
# sends password to server
def send_auth(sock, password):
auth = pack_8bit([0, 0, AUTH_REP, 1]) + \
bytes(password, 'ascii') + nullbyte()
print("Sending password")
sock.sendall(put_size(auth))
# client attributes depending on server version
def ser_version(ver):
return {
20: VERSION_20,
25: VERSION_25,
26: VERSION_26
}[ver]
def pack_8bit(lista):
r = b''
for i in lista:
r = r + i.to_bytes(1, 'big')
return r
def pack_32bit(lista):
return array.array('i', lista)
def nullbyte():
null = 0
return null.to_bytes(1, 'big')
# sets packet size in first 2 bytes
def put_size(packet):
p = len(packet).to_bytes(2, 'big') + packet[2:]
return p
def send_chat_msg(sock, message):
msg = pack_8bit([0, 0, SEND_CHAT, 1]) + \
bytes(message, 'ascii') + nullbyte()
sock.sendall(put_size(msg))
def freeciv_bot(hostname, port, botname, version, password):
server_address = (hostname, port)
global sock_d
sock_d = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('connecting to {} port {}'.format(*server_address))
sock_d.connect(server_address)
global send_from_now
try:
name = bytes(botname, 'ascii')
freeciv = bytes(ser_version(version), 'ascii')
# first 2 bytes are size of packet
# 2,6,2 is just client version, works on any server
packer = pack_8bit([0, 0, PJOIN_REQ]) + name + nullbyte() + \
freeciv + nullbyte() + nullbyte() + pack_32bit([2, 6, 2])
# send name to server
sock_d.sendall(put_size(packer))
r = 0
while True:
pong = 0
#print("NR -------------------------------------:", r)
block = get_block(sock_d)
#print('Block says:'," ".join(["{:02}".format(x) for x in block]))
#print('Block says:', block.decode('ascii', 'ignore'))
if not block:
break
pong = process_jumbo(block)
# jumbo is multipacket and could be many responses needed
for rats in pong:
if rats == PONG:
send_pong(sock_d)
if rats == JOINED:
send_chat_msg(sock_d, "/detach")
if rats == 6:
send_auth(sock_d, password)
if (r > 3):
send_from_now = True
r = r + 1
finally:
print('closing socket')
sock_d.close()
async def sleeping_dog():
while True:
await asyncio.sleep(1)
async def tcp_discord_send(message, once):
global ricer
global discord_id
print('**********************************')
while(True):
writer = 0
try:
reader, writer = await asyncio.open_connection(
'127.0.0.1', 9999)
if ricer.discord_len():
pmsg = ricer.pop_discord_message()
msg = (discord_id + str(pmsg)).encode()
print("ENCODED MSG:", msg)
writer.write(msg)
await writer.drain()
else:
writer.write(discord_id.encode())
await writer.drain()
data = await reader.read(1024)
if data != b'\x00' and data != discord_id:
discord_request = data.decode('utf-8')
if send_from_now and discord_request != b'\x00' and len(discord_request) > 1:
discord_request = discord_request.lstrip()
send_chat_msg(sock_d, discord_request)
writer.close()
await asyncio.sleep(1)
if (once):
break
except:
print("Unexpected error:", sys.exc_info()[0])
if writer:
writer.close()
if (once):
break
await asyncio.sleep(1)
def loop_in_thread(loop):
asyncio.set_event_loop(loop)
loop.run_until_complete(tcp_discord_send('', False))
async def discord(discordID):
global discord_id
print("***Starting Discord Thread***")
if (discordID != ""):
discord_id = discordID
discord_id = discord_id + "::"
await tcp_discord_send('', False)
print("***Ending Discord Thread***")
async def tc_timer():
global ricer
print("***Starting Timer Thread***")
while True:
s = time.perf_counter()
x = ricer.get_timer_target()
# there might be some big random value when connecting to server when game is not running
if (int(x) != -1 and x < 9999999):
x = x - s
x = int(x)
if x > 0 and x % 15 == 0:
m = "Time to new turn" + str(int(x))
print(m)
ricer.append_discord_message(m)
await asyncio.sleep(1)
def thread_function(discordID, loop):
asyncio.set_event_loop(loop)
loop.run_until_complete(tc_timer())
loop.close()
def thread_function2(discordID, loop):
asyncio.set_event_loop(loop)
print("DISCORD ID")
loop.run_until_complete(discord(discordID))
loop.close()
def run_forest(hostname, port, botname, version, password, discordID, spik):
global speak
global send_from_now
send_from_now = False
speak = spik
loop = asyncio.get_event_loop()
x = threading.Thread(target=thread_function, args=(discordID, loop))
x.start()
loop = asyncio.new_event_loop()
y = threading.Thread(target=thread_function2, args=(discordID, loop))
y.start()
freeciv_bot(hostname, port, botname, version, password)
if __name__ == '__main__':
parser = ArgumentParser(description='Freeciv Bot')
parser.add_argument('hostname', nargs='?', default='linuxiuvat.de',
help='freeciv server hostname (default: %(default)s)')
parser.add_argument('-p', type=int, metavar='port', default=5556,
help='TCP port number (default: %(default)s)')
parser.add_argument('-n', type=str, metavar='botname', nargs='?', default="Python",
help='Bot name (default: %(default)s)')
parser.add_argument('-password', type=str, metavar='password', nargs='?', default="",
help='Password (default: %(default)s)')
parser.add_argument('-ver', type=int, metavar='server version', default=26,
help='Server version - 20 or 25 or 26 (default: %(default)s)')
parser.add_argument('-discordID', type=str, metavar='discordID', nargs='?', default='',
help='Password (default: %(default)s)')
parser.add_argument('-speak', type=bool, metavar='speak', nargs='?', default='True',
help='Allow bot to speak (default: %(default)s)')
args = parser.parse_args()
run_forest(args.hostname, args.p, args.n, args.ver,
args.password, args.discordID, args.speak)
|
ultimate.py
|
# -*- coding: utf-8 -*-
import schedule
import time
import sys
import os
import random
import yaml #->added to make pics upload -> see job8
import glob #->added to make pics upload -> see job8
from tqdm import tqdm
import threading #->added to make multithreadening possible -> see fn run_threaded
sys.path.append(os.path.join(sys.path[0],'../../'))
from instabot import Bot
bot = Bot(comments_file="comments.txt")
bot.login()
bot.logger.info("ULTIMATE script. 24hours save")
comments_file_name = "comments.txt"
random_user_file = bot.read_list_from_file("username_database.txt")
random_hashtag_file = bot.read_list_from_file("hashtag_database.txt")
#to get pics and autopost it
posted_pic_list = []
try:
with open('pics.txt', 'r') as f:
posted_pic_list = f.read().splitlines()
except:
posted_pic_list = []
#!!-> to work this feature properly write full/absolute path to .jgp files as follows ->v
pics = glob.glob("/home/user/instagram/instabot/examples/ultimate_schedule/pics/*.jpg") #!!change this
pics = sorted(pics)
#end of pics processing
#fn to return random value for separate jobs
def get_random(from_list):
_random=random.choice(from_list)
print("Random from ultimate.py script is chosen: \n" + _random + "\n")
return _random
def stats(): bot.save_user_stats(bot.user_id)
def job1(): bot.like_hashtag(get_random(random_hashtag_file), amount=int(700/24))
def job2(): bot.like_timeline(amount=int(300/24))
def job3(): bot.like_followers(get_random(random_user_file), nlikes=3)
def job4(): bot.follow_followers(get_random(random_user_file))
def job5(): bot.comment_medias(bot.get_timeline_medias())
def job6(): bot.unfollow_non_followers()
def job7(): bot.follow_users(bot.get_hashtag_users(get_random(random_hashtag_file)))
def job8(): #-->fn to upload photos /auto_uploader
try:
for pic in pics:
if pic in posted_pic_list:
continue
hashtags = "/>\n#instabot #vaskokorobko #kyiv" #add custom hashtags
caption = pic[:-4].split(" ") #caption is made from the name of file
caption = " ".join(caption[1:])
caption = "\n<" + caption + hashtags #create full caption with hashtags
print("upload: " + caption)
bot.uploadPhoto(pic, caption=caption)
if bot.LastResponse.status_code != 200:
print("Smth went wrong. Read the following ->\n")
print(bot.LastResponse)
# snd msg
break
if not pic in posted_pic_list:
posted_pic_list.append(pic)
with open('pics.txt', 'a') as f:
f.write(pic + "\n")
print("Succsesfully uploaded: " + pic)
break
except Exception as e:
print(str(e))
#end of job8
#function to make threads -> details here http://bit.ly/faq_schedule
def run_threaded(job_fn):
job_thread=threading.Thread(target=job_fn)
job_thread.start()
schedule.every(1).hour.do(run_threaded, stats) #get stats
schedule.every(8).hours.do(run_threaded, job1) #like hashtag
schedule.every(2).hours.do(run_threaded, job2) #like timeline
schedule.every(1).days.at("16:00").do(run_threaded, job3) #like followers of users from file
schedule.every(2).days.at("11:00").do(run_threaded, job4) #follow followers
schedule.every(16).hours.do(run_threaded, job5) #comment medias
schedule.every(1).days.at("08:00").do(run_threaded, job6) #unfollow non-followers
schedule.every(12).hours.do(run_threaded, job7) #follow users from hashtag from file
schedule.every(1).days.at("21:28").do(run_threaded, job8) #upload pics
while True:
schedule.run_pending()
time.sleep(1)
|
word2vec.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
word2vec = tf.load_op_library(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'word2vec_ops.so'))
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval).")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval).")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [vocab_size].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.multiply(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise labels for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(true_logits), logits=true_logits)
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(sampled_logits), logits=sampled_logits)
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
self._nemb = nemb
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram_word2vec(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.summary.scalar("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
np.savetxt(os.path.join(opts.save_path, "vectors.txt"), self._nemb.eval())
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
return
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
test_local.py
|
import asyncio
import copy
import math
import operator
import sys
import time
from functools import partial
from threading import Thread
import pytest
from werkzeug import local
if sys.version_info < (3, 7):
def run_async(coro):
return asyncio.get_event_loop().run_until_complete(coro)
else:
def run_async(coro):
return asyncio.run(coro)
def test_basic_local():
ns = local.Local()
ns.foo = 0
values = []
def value_setter(idx):
time.sleep(0.01 * idx)
ns.foo = idx
time.sleep(0.02)
values.append(ns.foo)
threads = [Thread(target=value_setter, args=(x, )) for x in [1, 2, 3]]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert sorted(values) == [1, 2, 3]
def delfoo():
del ns.foo
delfoo()
pytest.raises(AttributeError, lambda: ns.foo)
pytest.raises(AttributeError, delfoo)
local.release_local(ns)
@pytest.mark.skipif(
sys.version_info < (3, 7),
reason="Locals are not task local in Python 3.6",
)
def test_basic_local_asyncio():
ns = local.Local()
ns.foo = 0
values = []
async def value_setter(idx):
await asyncio.sleep(0.01 * idx)
ns.foo = idx
await asyncio.sleep(0.02)
values.append(ns.foo)
async def main():
futures = [asyncio.ensure_future(value_setter(i)) for i in [1, 2, 3]]
await asyncio.gather(*futures)
run_async(main())
assert sorted(values) == [1, 2, 3]
def delfoo():
del ns.foo
delfoo()
pytest.raises(AttributeError, lambda: ns.foo)
pytest.raises(AttributeError, delfoo)
local.release_local(ns)
def test_local_release():
ns = local.Local()
ns.foo = 42
local.release_local(ns)
assert not hasattr(ns, "foo")
ls = local.LocalStack()
ls.push(42)
local.release_local(ls)
assert ls.top is None
def test_local_stack():
ls = local.LocalStack()
assert ls.top is None
ls.push(42)
assert ls.top == 42
ls.push(23)
assert ls.top == 23
ls.pop()
assert ls.top == 42
ls.pop()
assert ls.top is None
assert ls.pop() is None
assert ls.pop() is None
proxy = ls()
ls.push([1, 2])
assert proxy == [1, 2]
ls.push((1, 2))
assert proxy == (1, 2)
ls.pop()
ls.pop()
assert repr(proxy) == "<LocalProxy unbound>"
@pytest.mark.skipif(
sys.version_info < (3, 7),
reason="Locals are not task local in Python 3.6",
)
def test_local_stack_asyncio():
ls = local.LocalStack()
ls.push(1)
async def task():
ls.push(1)
assert len(ls._local.stack) == 2
async def main():
futures = [asyncio.ensure_future(task()) for _ in range(3)]
await asyncio.gather(*futures)
run_async(main())
@pytest.mark.skipif(
sys.version_info > (3, 6),
reason="The ident is not supported in Python3.7 or higher",
)
def test_custom_idents():
ident = 0
ns = local.Local()
stack = local.LocalStack()
local.LocalManager([ns, stack], ident_func=lambda: ident)
ns.foo = 42
stack.push({"foo": 42})
ident = 1
ns.foo = 23
stack.push({"foo": 23})
ident = 0
assert ns.foo == 42
assert stack.top["foo"] == 42
stack.pop()
assert stack.top is None
ident = 1
assert ns.foo == 23
assert stack.top["foo"] == 23
stack.pop()
assert stack.top is None
def test_proxy_local():
ns = local.Local()
ns.foo = []
p = local.LocalProxy(ns, "foo")
p.append(42)
p.append(23)
p[1:] = [1, 2, 3]
assert p == [42, 1, 2, 3]
assert p == ns.foo
ns.foo += [1]
assert list(p) == [42, 1, 2, 3, 1]
p_from_local = ns("foo")
p_from_local.append(2)
assert p == p_from_local
assert p._get_current_object() is ns.foo
def test_proxy_callable():
value = 42
p = local.LocalProxy(lambda: value)
assert p == 42
value = [23]
p.append(42)
assert p == [23, 42]
assert value == [23, 42]
assert p._get_current_object() is value
def test_proxy_wrapped():
class SomeClassWithWrapped:
__wrapped__ = "wrapped"
def lookup_func():
return 42
proxy = local.LocalProxy(lookup_func)
assert proxy.__wrapped__ is lookup_func
partial_lookup_func = partial(lookup_func)
partial_proxy = local.LocalProxy(partial_lookup_func)
assert partial_proxy.__wrapped__ == partial_lookup_func
ns = local.Local()
ns.foo = SomeClassWithWrapped()
ns.bar = 42
assert ns("foo").__wrapped__ == "wrapped"
pytest.raises(AttributeError, lambda: ns("bar").__wrapped__)
def test_proxy_doc():
def example():
"""example doc"""
assert local.LocalProxy(lambda: example).__doc__ == "example doc"
# The __doc__ descriptor shouldn't block the LocalProxy's class doc.
assert local.LocalProxy.__doc__.startswith("A proxy")
def test_proxy_fallback():
def _raises():
raise RuntimeError()
local_proxy = local.LocalProxy(_raises)
assert repr(local_proxy) == "<LocalProxy unbound>"
assert isinstance(local_proxy, local.LocalProxy)
assert not isinstance(local_proxy, Thread)
def test_proxy_unbound():
ns = local.Local()
p = ns("value")
assert repr(p) == "<LocalProxy unbound>"
assert not p
assert dir(p) == []
def _make_proxy(value):
ns = local.Local()
ns.value = value
p = ns("value")
return ns, p
def test_proxy_type():
_, p = _make_proxy([])
assert isinstance(p, list)
assert p.__class__ is list
assert issubclass(type(p), local.LocalProxy)
assert type(p) is local.LocalProxy
def test_proxy_string_representations():
class Example:
def __repr__(self):
return "a"
def __bytes__(self):
return b"b"
def __index__(self):
return 23
_, p = _make_proxy(Example())
assert str(p) == "a"
assert repr(p) == "a"
assert bytes(p) == b"b"
# __index__
assert bin(p) == "0b10111"
assert oct(p) == "0o27"
assert hex(p) == "0x17"
def test_proxy_hash():
ns, p = _make_proxy("abc")
assert hash(ns.value) == hash(p)
@pytest.mark.parametrize(
"op",
[
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.mod,
divmod,
pow,
operator.lshift,
operator.rshift,
operator.and_,
operator.or_,
operator.xor,
],
)
def test_proxy_binop_int(op):
_, p = _make_proxy(2)
assert op(p, 3) == op(2, 3)
# r-op
assert op(3, p) == op(3, 2)
@pytest.mark.parametrize("op",
[operator.neg, operator.pos, abs, operator.invert])
def test_proxy_uop_int(op):
_, p = _make_proxy(-2)
assert op(p) == op(-2)
def test_proxy_numeric():
class Example:
def __complex__(self):
return 1 + 2j
def __int__(self):
return 1
def __float__(self):
return 2.1
def __round__(self, n=None):
if n is not None:
return 3.3
return 3
def __trunc__(self):
return 4
def __floor__(self):
return 5
def __ceil__(self):
return 6
def __index__(self):
return 2
_, p = _make_proxy(Example())
assert complex(p) == 1 + 2j
assert int(p) == 1
assert float(p) == 2.1
assert round(p) == 3
assert round(p, 2) == 3.3
assert math.trunc(p) == 4
assert math.floor(p) == 5
assert math.ceil(p) == 6
assert [1, 2, 3][p] == 3 # __index__
@pytest.mark.parametrize(
"op",
[
operator.iadd,
operator.isub,
operator.imul,
operator.imatmul,
operator.itruediv,
operator.ifloordiv,
operator.imod,
operator.ipow,
operator.ilshift,
operator.irshift,
operator.iand,
operator.ior,
operator.ixor,
],
)
def test_proxy_iop(op):
class Example:
value = 1
def fake_op(self, other):
self.value = other
return self
__iadd__ = fake_op
__isub__ = fake_op
__imul__ = fake_op
__imatmul__ = fake_op
__itruediv__ = fake_op
__ifloordiv__ = fake_op
__imod__ = fake_op
__ipow__ = fake_op
__ilshift__ = fake_op
__irshift__ = fake_op
__iand__ = fake_op
__ior__ = fake_op
__ixor__ = fake_op
ns, p = _make_proxy(Example())
p_out = op(p, 2)
assert type(p_out) is local.LocalProxy
assert p.value == 2
assert ns.value.value == 2
def test_proxy_matmul():
class Example:
def __matmul__(self, other):
return 2 * other
def __rmatmul__(self, other):
return 2 * other
_, p = _make_proxy(Example())
assert p @ 3 == 6
assert 4 @ p == 8
def test_proxy_str():
_, p = _make_proxy("{act} %s")
assert p + " world" == "{act} %s world"
assert "say " + p == "say {act} %s"
assert p * 2 == "{act} %s{act} %s"
assert 2 * p == p * 2
assert p % ("world", ) == "{act} world"
assert p.format(act="test") == "test %s"
def test_proxy_list():
_, p = _make_proxy([1, 2, 3])
assert len(p) == 3
assert p[0] == 1
assert 3 in p
assert 4 not in p
assert tuple(p) == (1, 2, 3)
assert list(reversed(p)) == [3, 2, 1]
p[0] = 4
assert p == [4, 2, 3]
del p[-1]
assert p == [4, 2]
p += [5]
assert p[-1] == 5
p *= 2
assert len(p) == 6
p[:] = []
assert not p
p.append(1)
assert p
assert p + [2] == [1, 2]
assert [2] + p == [2, 1]
def test_proxy_copy():
class Foo:
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
ns, p = _make_proxy(Foo())
assert copy.copy(p) is ns.value
assert copy.deepcopy(p) is ns.value
a = []
_, p = _make_proxy([a])
assert copy.copy(p) == [a]
assert copy.copy(p)[0] is a
assert copy.deepcopy(p) == [a]
assert copy.deepcopy(p)[0] is not a
def test_proxy_iterator():
a = [1, 2, 3]
_, p = _make_proxy(iter(a))
assert next(p) == 1
def test_proxy_length_hint():
class Example:
def __length_hint__(self):
return 2
_, p = _make_proxy(Example())
assert operator.length_hint(p) == 2
def test_proxy_context_manager():
class Example:
value = 2
def __enter__(self):
self.value += 1
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.value -= 1
_, p = _make_proxy(Example())
assert p.value == 2
with p:
assert p.value == 3
assert p.value == 2
def test_proxy_class():
class Meta(type):
def __instancecheck__(cls, instance):
return True
def __subclasscheck__(cls, subclass):
return True
class Parent:
pass
class Example(Parent, metaclass=Meta):
pass
class Child(Example):
pass
_, p = _make_proxy(Example)
assert type(p()) is Example
assert isinstance(1, p)
assert issubclass(int, p)
assert p.__mro__ == (Example, Parent, object)
assert p.__bases__ == (Parent, )
assert p.__subclasses__() == [Child]
def test_proxy_attributes():
class Example:
def __init__(self):
object.__setattr__(self, "values", {})
def __getattribute__(self, name):
if name == "ham":
return "eggs"
return super().__getattribute__(name)
def __getattr__(self, name):
return self.values.get(name)
def __setattr__(self, name, value):
self.values[name] = value
def __delattr__(self, name):
del self.values[name]
def __dir__(self):
return sorted(self.values.keys())
_, p = _make_proxy(Example())
assert p.nothing is None
assert p.__dict__ == {"values": {}}
assert dir(p) == []
p.x = 1
assert p.x == 1
assert dir(p) == ["x"]
del p.x
assert dir(p) == []
assert p.ham == "eggs"
p.ham = "spam"
assert p.ham == "eggs"
assert p.values["ham"] == "spam"
def test_proxy_await():
async def get():
return 1
_, p = _make_proxy(get())
async def main():
return await p
out = run_async(main())
assert out == 1
def test_proxy_aiter():
class Example:
value = 3
def __aiter__(self):
return self
async def __anext__(self):
if self.value:
self.value -= 1
return self.value
raise StopAsyncIteration
_, p = _make_proxy(Example())
async def main():
out = []
async for v in p:
out.append(v)
return out
out = run_async(main())
assert out == [2, 1, 0]
def test_proxy_async_context_manager():
class Example:
value = 2
async def __aenter__(self):
self.value += 1
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
self.value -= 1
_, p = _make_proxy(Example())
async def main():
async with p:
assert p.value == 3
assert p.value == 2
return True
assert run_async(main())
|
repository.py
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import functools
import os
import re
import shutil
import subprocess
from argparse import ArgumentParser, _SubParsersAction
from contextlib import contextmanager
from textwrap import dedent
from threading import Thread
from pex import dist_metadata, pex_warnings
from pex.commands.command import Error, JsonMixin, Ok, OutputMixin, Result
from pex.common import (
DETERMINISTIC_DATETIME_TIMESTAMP,
pluralize,
safe_mkdir,
safe_mkdtemp,
safe_open,
)
from pex.compatibility import Queue
from pex.environment import PEXEnvironment
from pex.interpreter import PythonIdentity, PythonInterpreter, spawn_python_job
from pex.jobs import Retain, SpawnedJob, execute_parallel
from pex.pex import PEX
from pex.third_party.pkg_resources import Distribution
from pex.tools.command import PEXCommand
from pex.typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
import attr # vendor:skip
from typing import Callable, IO, Iterable, Iterator, Text, Tuple
RepositoryFunc = Callable[["Repository", PEX], Result]
else:
from pex.third_party import attr
@attr.s(frozen=True)
class FindLinksRepo(object):
@classmethod
def serve(
cls,
interpreter, # type: PythonInterpreter
port, # type: int
directory, # type: str
):
# type: (...) -> FindLinksRepo
http_server_module = "SimpleHTTPServer" if interpreter.version[0] == 2 else "http.server"
cmd, http_server_process = interpreter.open_process(
# N.B.: Running Python in unbuffered mode here is critical to being able to read stdout.
args=["-u", "-m", http_server_module, str(port)],
cwd=directory,
stdout=subprocess.PIPE,
)
real_port = Queue() # type: Queue[int]
def read_data():
try:
data = http_server_process.stdout.readline()
match = re.match(br"^Serving HTTP on [^\s]+ port (?P<port>\d+)[^\d]", data)
real_port.put(int(match.group("port")))
finally:
real_port.task_done()
reader = Thread(target=read_data)
reader.daemon = True
reader.start()
real_port.join()
reader.join()
return cls(cmd=cmd, port=real_port.get(), server_process=http_server_process)
cmd = attr.ib() # type: Iterable[str]
port = attr.ib() # type: int
_server_process = attr.ib() # type: subprocess.Popen
@property
def pid(self):
# type: () -> int
return self._server_process.pid
def join(self):
# type: () -> int
return self._server_process.wait()
def kill(self):
# type: () -> None
self._server_process.kill()
class Repository(JsonMixin, OutputMixin, PEXCommand):
"""Interact with the Python distribution repository contained in a PEX file."""
@classmethod
def _add_info_arguments(cls, subparsers):
# type: (_SubParsersAction) -> ArgumentParser
info_parser = subparsers.add_parser(
name="info", help="Print information about the distributions in a PEX file."
)
info_parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Print the distributions requirements in addition to its name version and path.",
)
cls.add_json_options(info_parser, entity="verbose output")
cls.register_global_arguments(info_parser, include_verbosity=False)
return info_parser
@classmethod
def _add_extract_arguments(cls, subparsers):
# type: (_SubParsersAction) -> ArgumentParser
extract_parser = subparsers.add_parser(
name="extract", help="Extract all distributions from a PEX file."
)
extract_parser.add_argument(
"-f",
"--dest-dir",
"--find-links",
"--repo",
metavar="PATH",
help="The path to extract distribution as wheels to.",
)
extract_parser.add_argument(
"-D",
"--sources",
action="store_true",
help="Also extract a wheel for the PEX file sources.",
)
extract_parser.add_argument(
"--use-system-time",
dest="use_system_time",
default=False,
action="store_true",
help=(
"Use the current system time to generate timestamps for the extracted "
"distributions. Otherwise, Pex will use midnight on January 1, 1980. By using "
"system time, the extracted distributions will not be reproducible, meaning that "
"if you were to re-run extraction against the same PEX file then the newly "
"extracted distributions would not be byte-for-byte identical distributions "
"extracted in prior runs."
),
)
extract_parser.add_argument(
"--serve",
action="store_true",
help="Serve the --find-links repo.",
)
extract_parser.add_argument(
"--port",
type=int,
default=0,
metavar="PORT",
help="The port to serve the --find-links repo on.",
)
extract_parser.add_argument(
"--pid-file",
metavar="PATH",
help="The path of a file to write the <pid>:<port> of the find links server to.",
)
cls.register_global_arguments(extract_parser)
return extract_parser
@classmethod
def add_arguments(cls, parser):
# type: (ArgumentParser) -> None
cls.add_output_option(parser, entity="distribution information")
parser.set_defaults(repository_func=functools.partial(cls.show_help, parser))
subparsers = parser.add_subparsers(
description=(
"A PEX distribution repository can be operated on using any of the following "
"subcommands."
)
)
cls._add_info_arguments(subparsers).set_defaults(repository_func=cls._info)
cls._add_extract_arguments(subparsers).set_defaults(repository_func=cls._extract)
def run(self, pex):
# type: (PEX) -> Result
repository_func = cast("RepositoryFunc", self.options.repository_func)
return repository_func(self, pex)
@contextmanager
def _distributions_output(self, pex):
# type: (PEX) -> Iterator[Tuple[Iterable[Distribution], IO]]
with self.output(self.options) as out:
yield tuple(pex.resolve()), out
def _info(self, pex):
# type: (PEX) -> Result
with self._distributions_output(pex) as (distributions, output):
for distribution in distributions:
if self.options.verbose:
requires_python = dist_metadata.requires_python(distribution)
requires_dists = list(dist_metadata.requires_dists(distribution))
self.dump_json(
self.options,
dict(
project_name=distribution.project_name,
version=distribution.version,
requires_python=str(requires_python) if requires_python else None,
requires_dists=[str(dist) for dist in requires_dists],
location=distribution.location,
),
output,
)
else:
output.write(
"{project_name} {version} {location}".format(
project_name=distribution.project_name,
version=distribution.version,
location=distribution.location,
)
)
output.write("\n")
return Ok()
def _extract(self, pex):
# type: (PEX) -> Result
if not self.options.serve and not self.options.dest_dir:
return Error("Specify a --find-links directory to extract wheels to.")
dest_dir = (
os.path.abspath(os.path.expanduser(self.options.dest_dir))
if self.options.dest_dir
else safe_mkdtemp()
)
safe_mkdir(dest_dir)
if self.options.sources:
self._extract_sdist(pex, dest_dir)
def spawn_extract(distribution):
# type: (Distribution) -> SpawnedJob[Text]
env = os.environ.copy()
if not self.options.use_system_time:
# N.B.: The `SOURCE_DATE_EPOCH` env var is semi-standard magic for controlling
# build tools. Wheel has supported this since 2016.
# See:
# + https://reproducible-builds.org/docs/source-date-epoch/
# + https://github.com/pypa/wheel/blob/1b879e53fed1f179897ed47e55a68bc51df188db/wheel/archive.py#L36-L39
env.update(SOURCE_DATE_EPOCH=str(int(DETERMINISTIC_DATETIME_TIMESTAMP)))
job = spawn_python_job(
args=["-m", "wheel", "pack", "--dest-dir", dest_dir, distribution.location],
interpreter=pex.interpreter,
expose=["wheel"],
stdout=subprocess.PIPE,
env=env,
)
return SpawnedJob.stdout(
job, result_func=lambda out: "{}: {}".format(distribution, out.decode())
)
with self._distributions_output(pex) as (distributions, output):
errors = []
for result in execute_parallel(distributions, spawn_extract, error_handler=Retain()):
if isinstance(result, tuple):
distribution, error = result
errors.append(distribution)
output.write(
"Failed to build a wheel for {distribution}: {error}\n".format(
distribution=distribution, error=error
)
)
else:
output.write(result)
if errors:
return Error(
"Failed to build wheels for {count} {distributions}.".format(
count=len(errors), distributions=pluralize(errors, "distribution")
)
)
if not self.options.serve:
return Ok()
repo = FindLinksRepo.serve(
interpreter=pex.interpreter, port=self.options.port, directory=dest_dir
)
output.write(
"Serving find-links repo of {pex} via {find_links} at http://localhost:{port}\n".format(
pex=os.path.normpath(pex.path()), find_links=dest_dir, port=repo.port
)
)
if self.options.pid_file:
with safe_open(self.options.pid_file, "w") as fp:
fp.write("{}:{}".format(repo.pid, repo.port))
try:
return Result(exit_code=repo.join(), message=" ".join(repo.cmd))
except KeyboardInterrupt:
repo.kill()
return Ok("Shut down server for find links repo at {}.".format(dest_dir))
@staticmethod
def _extract_sdist(
pex, # type: PEX
dest_dir, # type: str
):
# type: (...) -> None
pex_info = pex.pex_info()
chroot = safe_mkdtemp()
pex_path = pex.path()
src = os.path.join(chroot, "src")
excludes = ["__main__.py", pex_info.PATH, pex_info.bootstrap, pex_info.internal_cache]
shutil.copytree(
PEXEnvironment.mount(pex_path).path, src, ignore=lambda _dir, _names: excludes
)
name, _ = os.path.splitext(os.path.basename(pex_path))
version = "0.0.0+{}".format(pex_info.code_hash)
zip_safe = False # Since PEX files never require code to be zip safe, assume it isn't.
py_modules = [os.path.splitext(f)[0] for f in os.listdir(src) if f.endswith(".py")]
packages = [
os.path.relpath(os.path.join(root, d), src).replace(os.sep, ".")
for root, dirs, _ in os.walk(src)
for d in dirs
]
install_requires = [str(req) for req in pex_info.requirements]
python_requires = None
if len(pex_info.interpreter_constraints) == 1:
python_requires = str(
PythonIdentity.parse_requirement(pex_info.interpreter_constraints[0]).specifier
)
elif pex_info.interpreter_constraints:
pex_warnings.warn(
"Omitting `python_requires` for {name} sdist since {pex} has multiple "
"interpreter constraints:\n{interpreter_constraints}".format(
name=name,
pex=os.path.normpath(pex_path),
interpreter_constraints="\n".join(
"{index}.) {constraint}".format(index=index, constraint=constraint)
for index, constraint in enumerate(
pex_info.interpreter_constraints, start=1
)
),
)
)
entry_points = []
if pex_info.entry_point and ":" in pex_info.entry_point:
entry_points = [(name, pex_info.entry_point)]
with open(os.path.join(chroot, "setup.cfg"), "w") as fp:
fp.write(
dedent(
"""\
[metadata]
name = {name}
version = {version}
[options]
zip_safe = {zip_safe}
{py_modules}
{packages}
package_dir =
=src
include_package_data = True
{python_requires}
{install_requires}
[options.entry_points]
{entry_points}
"""
).format(
name=name,
version=version,
zip_safe=zip_safe,
py_modules=(
"py_modules =\n {}".format("\n ".join(py_modules)) if py_modules else ""
),
packages=(
"packages = \n {}".format("\n ".join(packages)) if packages else ""
),
install_requires=(
"install_requires =\n {}".format("\n ".join(install_requires))
if install_requires
else ""
),
python_requires=(
"python_requires = {}".format(python_requires) if python_requires else ""
),
entry_points=(
"console_scripts =\n {}".format(
"\n ".join(
"{} = {}".format(name, entry_point)
for name, entry_point in entry_points
)
)
if entry_points
else ""
),
)
)
with open(os.path.join(chroot, "MANIFEST.in"), "w") as fp:
fp.write("recursive-include src *")
with open(os.path.join(chroot, "setup.py"), "w") as fp:
fp.write("import setuptools; setuptools.setup()")
spawn_python_job(
args=["setup.py", "sdist", "--dist-dir", dest_dir],
interpreter=pex.interpreter,
expose=["setuptools"],
cwd=chroot,
).wait()
|
main.py
|
from quixstreaming import QuixStreamingClient
from quixstreaming.app import App
from quix_functions import QuixFunctions
import requests
import time
import traceback
from threading import Thread
import os
try:
# should the main loop run?
run = True
# Quix injects credentials automatically to the client.
# Alternatively, you can always pass an SDK token manually as an argument.
client = QuixStreamingClient()
# Open the output topic where to write data out
print("Opening output topic")
output_topic = client.open_output_topic(os.environ["output"])
# Which currency pairs are you interested in?
primary_currency = os.environ["primary_currency"] # e.g."BTC"
secondary_currencies = os.environ["secondary_currencies"] # e.g."USD,GBP"
url = 'https://rest.coinapi.io/v1/exchangerate/{0}?filter_asset_id={1}'.format(primary_currency, secondary_currencies)
# COIN API Key
coin_api_key = "{}".format(os.environ["coin_api_key"])
if coin_api_key == '':
raise ValueError('Please update coin_api_key env var with your COIN API Key')
headers = {'X-CoinAPI-Key': coin_api_key}
output_stream = output_topic.create_stream("coin-api")
# Give the stream human-readable name. This name will appear in data catalogue.
output_stream.properties.name = "Coin API"
# Save stream in specific folder in data catalogue to help organize your workspace.
output_stream.properties.location = "/Coin API"
def get_data():
global run
quix_functions = QuixFunctions(output_stream)
while run:
response = requests.get(url, headers=headers)
data = response.json()
rows = data['rates']
quix_functions.data_handler(rows, primary_currency)
# We sleep for 15 minutes so we don't reach free COIN API account limit.
# Stop sleeping if process termination requested
sleeping = 0
while sleeping <= 900 and run:
sleeping = sleeping + 1
time.sleep(1)
def before_shutdown():
global run
# Stop the main loop
run = False
def main():
thread = Thread(target=get_data)
thread.start()
print("CONNECTED!")
App.run(before_shutdown=before_shutdown)
# wait for worker thread to end
thread.join()
print("Exiting")
if __name__ == "__main__":
main()
except Exception:
print("ERROR: {}".format(traceback.format_exc()))
|
lro_track_2.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: /data2/captures/20200329/LRO_RHCP_2020-04-01T22:40:27Z
# GNU Radio version: 3.7.13.5
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from datetime import datetime as dt; import string; import math
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import fft
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from optparse import OptionParser
import sip
import sys
import threading
import time
from gnuradio import qtgui
class lro_track_2(gr.top_block, Qt.QWidget):
def __init__(self, avg_len=256, nfft=2048, path="/data2/captures/20200329", record_hz=10, rx_alt=542, rx_lat=37.148745, rx_lon=-80.578557, signal_type='LRO', usrp_type='B210'):
gr.top_block.__init__(self, "/data2/captures/20200329/LRO_RHCP_2020-04-01T22:40:27Z")
Qt.QWidget.__init__(self)
self.setWindowTitle("/data2/captures/20200329/LRO_RHCP_2020-04-01T22:40:27Z")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "lro_track_2")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Parameters
##################################################
self.avg_len = avg_len
self.nfft = nfft
self.path = path
self.record_hz = record_hz
self.rx_alt = rx_alt
self.rx_lat = rx_lat
self.rx_lon = rx_lon
self.signal_type = signal_type
self.usrp_type = usrp_type
##################################################
# Variables
##################################################
self.ts_str = ts_str = dt.strftime(dt.utcnow(), "%Y-%m-%dT%H:%M:%SZ")
self.rhcp_probe_snr_func = rhcp_probe_snr_func = 0
self.rhcp_probe_signal_func = rhcp_probe_signal_func = 0
self.rhcp_probe_offset_func = rhcp_probe_offset_func = 0
self.rhcp_probe_noise_func = rhcp_probe_noise_func = 0
self.lhcp_probe_snr_func = lhcp_probe_snr_func = 0
self.lhcp_probe_signal_func = lhcp_probe_signal_func = 0
self.lhcp_probe_offset_func = lhcp_probe_offset_func = 0
self.lhcp_probe_noise_func = lhcp_probe_noise_func = 0
self.samp_rate = samp_rate = 4e6
self.rhcp_snr_var = rhcp_snr_var = "{:3.3f}".format(rhcp_probe_snr_func)
self.rhcp_signal_var = rhcp_signal_var = "{:3.3f}".format(rhcp_probe_signal_func)
self.rhcp_offset_var = rhcp_offset_var = "{:3.1f}".format(rhcp_probe_offset_func)
self.rhcp_noise_var = rhcp_noise_var = "{:3.3f}".format(rhcp_probe_noise_func)
self.lhcp_snr_var = lhcp_snr_var = "{:3.3f}".format(lhcp_probe_snr_func)
self.lhcp_signal_var = lhcp_signal_var = "{:3.3f}".format(lhcp_probe_signal_func)
self.lhcp_offset_var = lhcp_offset_var = "{:3.1f}".format(lhcp_probe_offset_func)
self.lhcp_noise_var = lhcp_noise_var = "{:3.3f}".format(lhcp_probe_noise_func)
self.fn_rhcp = fn_rhcp = "{:s}_RHCP_{:s}".format(signal_type.upper(), ts_str)
self.fn_lhcp = fn_lhcp = "{:s}_LHCP_{:s}".format(signal_type.upper(), ts_str)
self.rx_gain = rx_gain = 20
self.rx_freq = rx_freq = 2271.2e6
self.rhcp_snr_label = rhcp_snr_label = rhcp_snr_var
self.rhcp_signal_label = rhcp_signal_label = rhcp_signal_var
self.rhcp_offset_label = rhcp_offset_label = rhcp_offset_var
self.rhcp_noise_label = rhcp_noise_label = rhcp_noise_var
self.lhcp_signal_label = lhcp_signal_label = lhcp_signal_var
self.lhcp_offset_label = lhcp_offset_label = lhcp_offset_var
self.lhcp_noise_label = lhcp_noise_label = lhcp_noise_var
self.lchp_snr_label = lchp_snr_label = lhcp_snr_var
self.keep_n = keep_n = samp_rate/record_hz
self.fp_rhcp = fp_rhcp = "{:s}/{:s}".format(path, fn_rhcp)
self.fp_lhcp = fp_lhcp = "{:s}/{:s}".format(path, fn_lhcp)
self.fft_min = fft_min = -115
self.fft_max = fft_max = -75
self.decim_0 = decim_0 = 8
self.decim = decim = 8
self.alpha = alpha = 1.0/(samp_rate/record_hz)
##################################################
# Blocks
##################################################
self.rhcp_probe_offset = blocks.probe_signal_f()
self._samp_rate_tool_bar = Qt.QToolBar(self)
self._samp_rate_tool_bar.addWidget(Qt.QLabel('SAMP_RATE'+": "))
self._samp_rate_line_edit = Qt.QLineEdit(str(self.samp_rate))
self._samp_rate_tool_bar.addWidget(self._samp_rate_line_edit)
self._samp_rate_line_edit.returnPressed.connect(
lambda: self.set_samp_rate(eng_notation.str_to_num(str(self._samp_rate_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._samp_rate_tool_bar, 0, 0, 1, 1)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 1):
self.top_grid_layout.setColumnStretch(c, 1)
self._rx_gain_tool_bar = Qt.QToolBar(self)
self._rx_gain_tool_bar.addWidget(Qt.QLabel('GAIN'+": "))
self._rx_gain_line_edit = Qt.QLineEdit(str(self.rx_gain))
self._rx_gain_tool_bar.addWidget(self._rx_gain_line_edit)
self._rx_gain_line_edit.returnPressed.connect(
lambda: self.set_rx_gain(eng_notation.str_to_num(str(self._rx_gain_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._rx_gain_tool_bar, 0, 2, 1, 1)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(2, 3):
self.top_grid_layout.setColumnStretch(c, 1)
self._rx_freq_tool_bar = Qt.QToolBar(self)
self._rx_freq_tool_bar.addWidget(Qt.QLabel('FREQ'+": "))
self._rx_freq_line_edit = Qt.QLineEdit(str(self.rx_freq))
self._rx_freq_tool_bar.addWidget(self._rx_freq_line_edit)
self._rx_freq_line_edit.returnPressed.connect(
lambda: self.set_rx_freq(eng_notation.str_to_num(str(self._rx_freq_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._rx_freq_tool_bar, 0, 1, 1, 1)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(1, 2):
self.top_grid_layout.setColumnStretch(c, 1)
self.rhcp_probe_snr = blocks.probe_signal_f()
self.rhcp_probe_signal = blocks.probe_signal_f()
def _rhcp_probe_offset_func_probe():
while True:
val = self.rhcp_probe_offset.level()
try:
self.set_rhcp_probe_offset_func(val)
except AttributeError:
pass
time.sleep(1.0 / (10))
_rhcp_probe_offset_func_thread = threading.Thread(target=_rhcp_probe_offset_func_probe)
_rhcp_probe_offset_func_thread.daemon = True
_rhcp_probe_offset_func_thread.start()
self.rhcp_probe_noise = blocks.probe_signal_f()
self.lhcp_probe_snr = blocks.probe_signal_f()
self.lhcp_probe_signal = blocks.probe_signal_f()
self.lhcp_probe_offset = blocks.probe_signal_f()
self.lhcp_probe_noise = blocks.probe_signal_f()
self._fft_min_tool_bar = Qt.QToolBar(self)
self._fft_min_tool_bar.addWidget(Qt.QLabel('fft_min'+": "))
self._fft_min_line_edit = Qt.QLineEdit(str(self.fft_min))
self._fft_min_tool_bar.addWidget(self._fft_min_line_edit)
self._fft_min_line_edit.returnPressed.connect(
lambda: self.set_fft_min(eng_notation.str_to_num(str(self._fft_min_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._fft_min_tool_bar, 0, 3, 1, 1)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(3, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self._fft_max_tool_bar = Qt.QToolBar(self)
self._fft_max_tool_bar.addWidget(Qt.QLabel('fft_max'+": "))
self._fft_max_line_edit = Qt.QLineEdit(str(self.fft_max))
self._fft_max_tool_bar.addWidget(self._fft_max_line_edit)
self._fft_max_line_edit.returnPressed.connect(
lambda: self.set_fft_max(eng_notation.str_to_num(str(self._fft_max_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._fft_max_tool_bar, 0, 4, 1, 1)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(4, 5):
self.top_grid_layout.setColumnStretch(c, 1)
self.uhd_usrp_source_1 = uhd.usrp_source(
",".join(("serial=30CF9D2", "")),
uhd.stream_args(
cpu_format="fc32",
channels=range(2),
),
)
self.uhd_usrp_source_1.set_clock_source('external', 0)
self.uhd_usrp_source_1.set_time_source('external', 0)
self.uhd_usrp_source_1.set_samp_rate(samp_rate)
self.uhd_usrp_source_1.set_time_now(uhd.time_spec(time.time()), uhd.ALL_MBOARDS)
self.uhd_usrp_source_1.set_center_freq(uhd.tune_request(rx_freq, samp_rate/2), 0)
self.uhd_usrp_source_1.set_gain(rx_gain, 0)
self.uhd_usrp_source_1.set_antenna('RX2', 0)
self.uhd_usrp_source_1.set_auto_dc_offset(True, 0)
self.uhd_usrp_source_1.set_auto_iq_balance(True, 0)
self.uhd_usrp_source_1.set_center_freq(uhd.tune_request(rx_freq, samp_rate/2), 1)
self.uhd_usrp_source_1.set_gain(rx_gain, 1)
self.uhd_usrp_source_1.set_antenna('RX2', 1)
self.uhd_usrp_source_1.set_auto_dc_offset(True, 1)
self.uhd_usrp_source_1.set_auto_iq_balance(True, 1)
self._rhcp_snr_label_tool_bar = Qt.QToolBar(self)
if None:
self._rhcp_snr_label_formatter = None
else:
self._rhcp_snr_label_formatter = lambda x: str(x)
self._rhcp_snr_label_tool_bar.addWidget(Qt.QLabel('SNR [dB]'+": "))
self._rhcp_snr_label_label = Qt.QLabel(str(self._rhcp_snr_label_formatter(self.rhcp_snr_label)))
self._rhcp_snr_label_tool_bar.addWidget(self._rhcp_snr_label_label)
self.top_grid_layout.addWidget(self._rhcp_snr_label_tool_bar, 9, 3, 1, 1)
for r in range(9, 10):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(3, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self._rhcp_signal_label_tool_bar = Qt.QToolBar(self)
if None:
self._rhcp_signal_label_formatter = None
else:
self._rhcp_signal_label_formatter = lambda x: str(x)
self._rhcp_signal_label_tool_bar.addWidget(Qt.QLabel('Signal [dBFS]'+": "))
self._rhcp_signal_label_label = Qt.QLabel(str(self._rhcp_signal_label_formatter(self.rhcp_signal_label)))
self._rhcp_signal_label_tool_bar.addWidget(self._rhcp_signal_label_label)
self.top_grid_layout.addWidget(self._rhcp_signal_label_tool_bar, 9, 1, 1, 1)
for r in range(9, 10):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(1, 2):
self.top_grid_layout.setColumnStretch(c, 1)
def _rhcp_probe_snr_func_probe():
while True:
val = self.rhcp_probe_snr.level()
try:
self.set_rhcp_probe_snr_func(val)
except AttributeError:
pass
time.sleep(1.0 / (10))
_rhcp_probe_snr_func_thread = threading.Thread(target=_rhcp_probe_snr_func_probe)
_rhcp_probe_snr_func_thread.daemon = True
_rhcp_probe_snr_func_thread.start()
def _rhcp_probe_signal_func_probe():
while True:
val = self.rhcp_probe_signal.level()
try:
self.set_rhcp_probe_signal_func(val)
except AttributeError:
pass
time.sleep(1.0 / (10))
_rhcp_probe_signal_func_thread = threading.Thread(target=_rhcp_probe_signal_func_probe)
_rhcp_probe_signal_func_thread.daemon = True
_rhcp_probe_signal_func_thread.start()
def _rhcp_probe_noise_func_probe():
while True:
val = self.rhcp_probe_noise.level()
try:
self.set_rhcp_probe_noise_func(val)
except AttributeError:
pass
time.sleep(1.0 / (10))
_rhcp_probe_noise_func_thread = threading.Thread(target=_rhcp_probe_noise_func_probe)
_rhcp_probe_noise_func_thread.daemon = True
_rhcp_probe_noise_func_thread.start()
self._rhcp_offset_label_tool_bar = Qt.QToolBar(self)
if None:
self._rhcp_offset_label_formatter = None
else:
self._rhcp_offset_label_formatter = lambda x: str(x)
self._rhcp_offset_label_tool_bar.addWidget(Qt.QLabel('Offset [Hz]'+": "))
self._rhcp_offset_label_label = Qt.QLabel(str(self._rhcp_offset_label_formatter(self.rhcp_offset_label)))
self._rhcp_offset_label_tool_bar.addWidget(self._rhcp_offset_label_label)
self.top_grid_layout.addWidget(self._rhcp_offset_label_tool_bar, 9, 0, 1, 1)
for r in range(9, 10):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 1):
self.top_grid_layout.setColumnStretch(c, 1)
self._rhcp_noise_label_tool_bar = Qt.QToolBar(self)
if None:
self._rhcp_noise_label_formatter = None
else:
self._rhcp_noise_label_formatter = lambda x: str(x)
self._rhcp_noise_label_tool_bar.addWidget(Qt.QLabel('Noise [dBFS]'+": "))
self._rhcp_noise_label_label = Qt.QLabel(str(self._rhcp_noise_label_formatter(self.rhcp_noise_label)))
self._rhcp_noise_label_tool_bar.addWidget(self._rhcp_noise_label_label)
self.top_grid_layout.addWidget(self._rhcp_noise_label_tool_bar, 9, 2, 1, 1)
for r in range(9, 10):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(2, 3):
self.top_grid_layout.setColumnStretch(c, 1)
self.rational_resampler_xxx_0_0 = filter.rational_resampler_ccc(
interpolation=1,
decimation=decim,
taps=None,
fractional_bw=None,
)
self.rational_resampler_xxx_0 = filter.rational_resampler_ccc(
interpolation=1,
decimation=decim,
taps=None,
fractional_bw=None,
)
self.qtgui_waterfall_sink_x_0_0 = qtgui.waterfall_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0.set_intensity_range(fft_min, fft_max)
self._qtgui_waterfall_sink_x_0_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_waterfall_sink_x_0_0_win, 5, 4, 4, 4)
for r in range(5, 9):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(4, 8):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0 = qtgui.waterfall_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0.set_intensity_range(fft_min, fft_max)
self._qtgui_waterfall_sink_x_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_waterfall_sink_x_0_win, 5, 0, 4, 4)
for r in range(5, 9):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0_0 = qtgui.freq_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"LHCP", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_0.set_update_time(0.010)
self.qtgui_freq_sink_x_0_0.set_y_axis(fft_min, fft_max)
self.qtgui_freq_sink_x_0_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0.enable_grid(True)
self.qtgui_freq_sink_x_0_0.set_fft_average(0.05)
self.qtgui_freq_sink_x_0_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_0_0_win, 1, 4, 4, 4)
for r in range(1, 5):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(4, 8):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"RHCP", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.010)
self.qtgui_freq_sink_x_0.set_y_axis(fft_min, fft_max)
self.qtgui_freq_sink_x_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(True)
self.qtgui_freq_sink_x_0.set_fft_average(0.05)
self.qtgui_freq_sink_x_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_0_win, 1, 0, 4, 4)
for r in range(1, 5):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self._lhcp_signal_label_tool_bar = Qt.QToolBar(self)
if None:
self._lhcp_signal_label_formatter = None
else:
self._lhcp_signal_label_formatter = lambda x: str(x)
self._lhcp_signal_label_tool_bar.addWidget(Qt.QLabel('Signal [dBFS]'+": "))
self._lhcp_signal_label_label = Qt.QLabel(str(self._lhcp_signal_label_formatter(self.lhcp_signal_label)))
self._lhcp_signal_label_tool_bar.addWidget(self._lhcp_signal_label_label)
self.top_grid_layout.addWidget(self._lhcp_signal_label_tool_bar, 9, 5, 1, 1)
for r in range(9, 10):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(5, 6):
self.top_grid_layout.setColumnStretch(c, 1)
def _lhcp_probe_snr_func_probe():
while True:
val = self.lhcp_probe_snr.level()
try:
self.set_lhcp_probe_snr_func(val)
except AttributeError:
pass
time.sleep(1.0 / (10))
_lhcp_probe_snr_func_thread = threading.Thread(target=_lhcp_probe_snr_func_probe)
_lhcp_probe_snr_func_thread.daemon = True
_lhcp_probe_snr_func_thread.start()
def _lhcp_probe_signal_func_probe():
while True:
val = self.lhcp_probe_signal.level()
try:
self.set_lhcp_probe_signal_func(val)
except AttributeError:
pass
time.sleep(1.0 / (10))
_lhcp_probe_signal_func_thread = threading.Thread(target=_lhcp_probe_signal_func_probe)
_lhcp_probe_signal_func_thread.daemon = True
_lhcp_probe_signal_func_thread.start()
def _lhcp_probe_offset_func_probe():
while True:
val = self.lhcp_probe_offset.level()
try:
self.set_lhcp_probe_offset_func(val)
except AttributeError:
pass
time.sleep(1.0 / (10))
_lhcp_probe_offset_func_thread = threading.Thread(target=_lhcp_probe_offset_func_probe)
_lhcp_probe_offset_func_thread.daemon = True
_lhcp_probe_offset_func_thread.start()
def _lhcp_probe_noise_func_probe():
while True:
val = self.lhcp_probe_noise.level()
try:
self.set_lhcp_probe_noise_func(val)
except AttributeError:
pass
time.sleep(1.0 / (10))
_lhcp_probe_noise_func_thread = threading.Thread(target=_lhcp_probe_noise_func_probe)
_lhcp_probe_noise_func_thread.daemon = True
_lhcp_probe_noise_func_thread.start()
self._lhcp_offset_label_tool_bar = Qt.QToolBar(self)
if None:
self._lhcp_offset_label_formatter = None
else:
self._lhcp_offset_label_formatter = lambda x: str(x)
self._lhcp_offset_label_tool_bar.addWidget(Qt.QLabel('Offset [Hz]'+": "))
self._lhcp_offset_label_label = Qt.QLabel(str(self._lhcp_offset_label_formatter(self.lhcp_offset_label)))
self._lhcp_offset_label_tool_bar.addWidget(self._lhcp_offset_label_label)
self.top_grid_layout.addWidget(self._lhcp_offset_label_tool_bar, 9, 4, 1, 1)
for r in range(9, 10):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(4, 5):
self.top_grid_layout.setColumnStretch(c, 1)
self._lhcp_noise_label_tool_bar = Qt.QToolBar(self)
if None:
self._lhcp_noise_label_formatter = None
else:
self._lhcp_noise_label_formatter = lambda x: str(x)
self._lhcp_noise_label_tool_bar.addWidget(Qt.QLabel('Noise [dBFS]'+": "))
self._lhcp_noise_label_label = Qt.QLabel(str(self._lhcp_noise_label_formatter(self.lhcp_noise_label)))
self._lhcp_noise_label_tool_bar.addWidget(self._lhcp_noise_label_label)
self.top_grid_layout.addWidget(self._lhcp_noise_label_tool_bar, 9, 6, 1, 1)
for r in range(9, 10):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(6, 7):
self.top_grid_layout.setColumnStretch(c, 1)
self._lchp_snr_label_tool_bar = Qt.QToolBar(self)
if None:
self._lchp_snr_label_formatter = None
else:
self._lchp_snr_label_formatter = lambda x: str(x)
self._lchp_snr_label_tool_bar.addWidget(Qt.QLabel('SNR [dB]'+": "))
self._lchp_snr_label_label = Qt.QLabel(str(self._lchp_snr_label_formatter(self.lchp_snr_label)))
self._lchp_snr_label_tool_bar.addWidget(self._lchp_snr_label_label)
self.top_grid_layout.addWidget(self._lchp_snr_label_tool_bar, 9, 7, 1, 1)
for r in range(9, 10):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(7, 8):
self.top_grid_layout.setColumnStretch(c, 1)
self._keep_n_tool_bar = Qt.QToolBar(self)
self._keep_n_tool_bar.addWidget(Qt.QLabel('keep_n'+": "))
self._keep_n_line_edit = Qt.QLineEdit(str(self.keep_n))
self._keep_n_tool_bar.addWidget(self._keep_n_line_edit)
self._keep_n_line_edit.returnPressed.connect(
lambda: self.set_keep_n(eng_notation.str_to_num(str(self._keep_n_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._keep_n_tool_bar, 0, 6, 1, 1)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(6, 7):
self.top_grid_layout.setColumnStretch(c, 1)
self.fft_vxx_0_1 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 4)
self.fft_vxx_0_0_0 = fft.fft_vcc(nfft/decim, True, (window.blackmanharris(nfft/decim)), True, 4)
self.fft_vxx_0_0 = fft.fft_vcc(nfft/decim, True, (window.blackmanharris(nfft/decim)), True, 4)
self.fft_vxx_0 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 4)
self.blocks_sub_xx_0_0 = blocks.sub_ff(1)
self.blocks_sub_xx_0 = blocks.sub_ff(1)
self.blocks_stream_to_vector_0_1 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, nfft)
self.blocks_stream_to_vector_0_0_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, nfft/decim)
self.blocks_stream_to_vector_0_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, nfft/decim)
self.blocks_stream_to_vector_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, nfft)
self.blocks_nlog10_ff_0_0_1 = blocks.nlog10_ff(10, nfft, -10*math.log10(nfft))
self.blocks_nlog10_ff_0_0_0_0 = blocks.nlog10_ff(10, nfft/decim, -10*math.log10(nfft/decim))
self.blocks_nlog10_ff_0_0_0 = blocks.nlog10_ff(10, nfft/decim, -10*math.log10(nfft/decim))
self.blocks_nlog10_ff_0_0 = blocks.nlog10_ff(10, nfft, -10*math.log10(nfft))
self.blocks_multiply_xx_0_0 = blocks.multiply_vcc(1)
self.blocks_multiply_xx_0 = blocks.multiply_vcc(1)
self.blocks_multiply_const_vxx_0_0_0 = blocks.multiply_const_vff((samp_rate/(2*math.pi), ))
self.blocks_multiply_const_vxx_0_0 = blocks.multiply_const_vff((samp_rate/(2*math.pi), ))
self.blocks_moving_average_xx_0_2 = blocks.moving_average_ff(int(avg_len), 1.0/(avg_len)/nfft, 4000, nfft)
self.blocks_moving_average_xx_0_1_0 = blocks.moving_average_ff(int(samp_rate/record_hz), 1.0/(samp_rate/record_hz), 4000, 1)
self.blocks_moving_average_xx_0_1 = blocks.moving_average_ff(int(samp_rate/record_hz), 1.0/(samp_rate/record_hz), 4000, 1)
self.blocks_moving_average_xx_0_0_0 = blocks.moving_average_ff(int(avg_len), 1.0/(avg_len)/(nfft/decim*2), 4000, nfft/decim)
self.blocks_moving_average_xx_0_0 = blocks.moving_average_ff(int(avg_len), 1.0/(avg_len)/(nfft/decim*2), 4000, nfft/decim)
self.blocks_moving_average_xx_0 = blocks.moving_average_ff(int(avg_len), 1.0/(avg_len)/nfft, 4000, nfft)
self.blocks_max_xx_0_1 = blocks.max_ff(nfft,1)
self.blocks_max_xx_0_0_0 = blocks.max_ff(nfft/decim,1)
self.blocks_max_xx_0_0 = blocks.max_ff(nfft/decim,1)
self.blocks_max_xx_0 = blocks.max_ff(nfft,1)
self.blocks_keep_one_in_n_0_0_1 = blocks.keep_one_in_n(gr.sizeof_float*1, int(samp_rate/record_hz))
self.blocks_keep_one_in_n_0_0 = blocks.keep_one_in_n(gr.sizeof_float*1, int(samp_rate/record_hz))
self.blocks_complex_to_mag_squared_0_0_1 = blocks.complex_to_mag_squared(nfft)
self.blocks_complex_to_mag_squared_0_0_0_0 = blocks.complex_to_mag_squared(nfft/decim)
self.blocks_complex_to_mag_squared_0_0_0 = blocks.complex_to_mag_squared(nfft/decim)
self.blocks_complex_to_mag_squared_0_0 = blocks.complex_to_mag_squared(nfft)
self.analog_sig_source_x_0_0 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, rhcp_probe_offset_func+samp_rate/4, 1, 0)
self.analog_sig_source_x_0 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, rhcp_probe_offset_func+samp_rate/4, 1, 0)
self.analog_pll_freqdet_cf_0_0 = analog.pll_freqdet_cf(math.pi/200, math.pi/2.0, -math.pi/2.0)
self.analog_pll_freqdet_cf_0 = analog.pll_freqdet_cf(math.pi/200, math.pi/2.0, -math.pi/2.0)
self._alpha_tool_bar = Qt.QToolBar(self)
self._alpha_tool_bar.addWidget(Qt.QLabel('alpha'+": "))
self._alpha_line_edit = Qt.QLineEdit(str(self.alpha))
self._alpha_tool_bar.addWidget(self._alpha_line_edit)
self._alpha_line_edit.returnPressed.connect(
lambda: self.set_alpha(eng_notation.str_to_num(str(self._alpha_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._alpha_tool_bar, 0, 5, 1, 1)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(5, 6):
self.top_grid_layout.setColumnStretch(c, 1)
##################################################
# Connections
##################################################
self.connect((self.analog_pll_freqdet_cf_0, 0), (self.blocks_multiply_const_vxx_0_0, 0))
self.connect((self.analog_pll_freqdet_cf_0_0, 0), (self.blocks_multiply_const_vxx_0_0_0, 0))
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_multiply_xx_0, 1))
self.connect((self.analog_sig_source_x_0_0, 0), (self.blocks_multiply_xx_0_0, 1))
self.connect((self.blocks_complex_to_mag_squared_0_0, 0), (self.blocks_moving_average_xx_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0_0_0, 0), (self.blocks_moving_average_xx_0_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0_0_0_0, 0), (self.blocks_moving_average_xx_0_0_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0_0_1, 0), (self.blocks_moving_average_xx_0_2, 0))
self.connect((self.blocks_keep_one_in_n_0_0, 0), (self.rhcp_probe_offset, 0))
self.connect((self.blocks_keep_one_in_n_0_0_1, 0), (self.lhcp_probe_offset, 0))
self.connect((self.blocks_max_xx_0, 0), (self.blocks_sub_xx_0, 0))
self.connect((self.blocks_max_xx_0, 0), (self.rhcp_probe_signal, 0))
self.connect((self.blocks_max_xx_0_0, 0), (self.blocks_sub_xx_0, 1))
self.connect((self.blocks_max_xx_0_0, 0), (self.rhcp_probe_noise, 0))
self.connect((self.blocks_max_xx_0_0_0, 0), (self.blocks_sub_xx_0_0, 1))
self.connect((self.blocks_max_xx_0_0_0, 0), (self.lhcp_probe_noise, 0))
self.connect((self.blocks_max_xx_0_1, 0), (self.blocks_sub_xx_0_0, 0))
self.connect((self.blocks_max_xx_0_1, 0), (self.lhcp_probe_signal, 0))
self.connect((self.blocks_moving_average_xx_0, 0), (self.blocks_nlog10_ff_0_0, 0))
self.connect((self.blocks_moving_average_xx_0_0, 0), (self.blocks_nlog10_ff_0_0_0, 0))
self.connect((self.blocks_moving_average_xx_0_0_0, 0), (self.blocks_nlog10_ff_0_0_0_0, 0))
self.connect((self.blocks_moving_average_xx_0_1, 0), (self.blocks_keep_one_in_n_0_0, 0))
self.connect((self.blocks_moving_average_xx_0_1_0, 0), (self.blocks_keep_one_in_n_0_0_1, 0))
self.connect((self.blocks_moving_average_xx_0_2, 0), (self.blocks_nlog10_ff_0_0_1, 0))
self.connect((self.blocks_multiply_const_vxx_0_0, 0), (self.blocks_moving_average_xx_0_1, 0))
self.connect((self.blocks_multiply_const_vxx_0_0_0, 0), (self.blocks_moving_average_xx_0_1_0, 0))
self.connect((self.blocks_multiply_xx_0, 0), (self.rational_resampler_xxx_0, 0))
self.connect((self.blocks_multiply_xx_0_0, 0), (self.rational_resampler_xxx_0_0, 0))
self.connect((self.blocks_nlog10_ff_0_0, 0), (self.blocks_max_xx_0, 0))
self.connect((self.blocks_nlog10_ff_0_0_0, 0), (self.blocks_max_xx_0_0, 0))
self.connect((self.blocks_nlog10_ff_0_0_0_0, 0), (self.blocks_max_xx_0_0_0, 0))
self.connect((self.blocks_nlog10_ff_0_0_1, 0), (self.blocks_max_xx_0_1, 0))
self.connect((self.blocks_stream_to_vector_0, 0), (self.fft_vxx_0, 0))
self.connect((self.blocks_stream_to_vector_0_0, 0), (self.fft_vxx_0_0, 0))
self.connect((self.blocks_stream_to_vector_0_0_0, 0), (self.fft_vxx_0_0_0, 0))
self.connect((self.blocks_stream_to_vector_0_1, 0), (self.fft_vxx_0_1, 0))
self.connect((self.blocks_sub_xx_0, 0), (self.rhcp_probe_snr, 0))
self.connect((self.blocks_sub_xx_0_0, 0), (self.lhcp_probe_snr, 0))
self.connect((self.fft_vxx_0, 0), (self.blocks_complex_to_mag_squared_0_0, 0))
self.connect((self.fft_vxx_0_0, 0), (self.blocks_complex_to_mag_squared_0_0_0, 0))
self.connect((self.fft_vxx_0_0_0, 0), (self.blocks_complex_to_mag_squared_0_0_0_0, 0))
self.connect((self.fft_vxx_0_1, 0), (self.blocks_complex_to_mag_squared_0_0_1, 0))
self.connect((self.rational_resampler_xxx_0, 0), (self.blocks_stream_to_vector_0_0, 0))
self.connect((self.rational_resampler_xxx_0_0, 0), (self.blocks_stream_to_vector_0_0_0, 0))
self.connect((self.uhd_usrp_source_1, 0), (self.analog_pll_freqdet_cf_0, 0))
self.connect((self.uhd_usrp_source_1, 1), (self.analog_pll_freqdet_cf_0_0, 0))
self.connect((self.uhd_usrp_source_1, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.uhd_usrp_source_1, 1), (self.blocks_multiply_xx_0_0, 0))
self.connect((self.uhd_usrp_source_1, 0), (self.blocks_stream_to_vector_0, 0))
self.connect((self.uhd_usrp_source_1, 1), (self.blocks_stream_to_vector_0_1, 0))
self.connect((self.uhd_usrp_source_1, 0), (self.qtgui_freq_sink_x_0, 0))
self.connect((self.uhd_usrp_source_1, 1), (self.qtgui_freq_sink_x_0_0, 0))
self.connect((self.uhd_usrp_source_1, 0), (self.qtgui_waterfall_sink_x_0, 0))
self.connect((self.uhd_usrp_source_1, 1), (self.qtgui_waterfall_sink_x_0_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "lro_track_2")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_avg_len(self):
return self.avg_len
def set_avg_len(self, avg_len):
self.avg_len = avg_len
self.blocks_moving_average_xx_0_2.set_length_and_scale(int(self.avg_len), 1.0/(self.avg_len)/self.nfft)
self.blocks_moving_average_xx_0_0_0.set_length_and_scale(int(self.avg_len), 1.0/(self.avg_len)/(self.nfft/self.decim*2))
self.blocks_moving_average_xx_0_0.set_length_and_scale(int(self.avg_len), 1.0/(self.avg_len)/(self.nfft/self.decim*2))
self.blocks_moving_average_xx_0.set_length_and_scale(int(self.avg_len), 1.0/(self.avg_len)/self.nfft)
def get_nfft(self):
return self.nfft
def set_nfft(self, nfft):
self.nfft = nfft
self.blocks_moving_average_xx_0_2.set_length_and_scale(int(self.avg_len), 1.0/(self.avg_len)/self.nfft)
self.blocks_moving_average_xx_0_0_0.set_length_and_scale(int(self.avg_len), 1.0/(self.avg_len)/(self.nfft/self.decim*2))
self.blocks_moving_average_xx_0_0.set_length_and_scale(int(self.avg_len), 1.0/(self.avg_len)/(self.nfft/self.decim*2))
self.blocks_moving_average_xx_0.set_length_and_scale(int(self.avg_len), 1.0/(self.avg_len)/self.nfft)
def get_path(self):
return self.path
def set_path(self, path):
self.path = path
self.set_fp_rhcp("{:s}/{:s}".format(self.path, self.fn_rhcp))
self.set_fp_lhcp("{:s}/{:s}".format(self.path, self.fn_lhcp))
def get_record_hz(self):
return self.record_hz
def set_record_hz(self, record_hz):
self.record_hz = record_hz
self.set_keep_n(self.samp_rate/self.record_hz)
self.blocks_moving_average_xx_0_1_0.set_length_and_scale(int(self.samp_rate/self.record_hz), 1.0/(self.samp_rate/self.record_hz))
self.blocks_moving_average_xx_0_1.set_length_and_scale(int(self.samp_rate/self.record_hz), 1.0/(self.samp_rate/self.record_hz))
self.blocks_keep_one_in_n_0_0_1.set_n(int(self.samp_rate/self.record_hz))
self.blocks_keep_one_in_n_0_0.set_n(int(self.samp_rate/self.record_hz))
self.set_alpha(1.0/(self.samp_rate/self.record_hz))
def get_rx_alt(self):
return self.rx_alt
def set_rx_alt(self, rx_alt):
self.rx_alt = rx_alt
def get_rx_lat(self):
return self.rx_lat
def set_rx_lat(self, rx_lat):
self.rx_lat = rx_lat
def get_rx_lon(self):
return self.rx_lon
def set_rx_lon(self, rx_lon):
self.rx_lon = rx_lon
def get_signal_type(self):
return self.signal_type
def set_signal_type(self, signal_type):
self.signal_type = signal_type
def get_usrp_type(self):
return self.usrp_type
def set_usrp_type(self, usrp_type):
self.usrp_type = usrp_type
def get_ts_str(self):
return self.ts_str
def set_ts_str(self, ts_str):
self.ts_str = ts_str
self.set_fn_rhcp("{:s}_RHCP_{:s}".format(signal_type.upper(), self.ts_str))
self.set_fn_lhcp("{:s}_LHCP_{:s}".format(signal_type.upper(), self.ts_str))
def get_rhcp_probe_snr_func(self):
return self.rhcp_probe_snr_func
def set_rhcp_probe_snr_func(self, rhcp_probe_snr_func):
self.rhcp_probe_snr_func = rhcp_probe_snr_func
self.set_rhcp_snr_var("{:3.3f}".format(self.rhcp_probe_snr_func))
def get_rhcp_probe_signal_func(self):
return self.rhcp_probe_signal_func
def set_rhcp_probe_signal_func(self, rhcp_probe_signal_func):
self.rhcp_probe_signal_func = rhcp_probe_signal_func
self.set_rhcp_signal_var("{:3.3f}".format(self.rhcp_probe_signal_func))
def get_rhcp_probe_offset_func(self):
return self.rhcp_probe_offset_func
def set_rhcp_probe_offset_func(self, rhcp_probe_offset_func):
self.rhcp_probe_offset_func = rhcp_probe_offset_func
self.set_rhcp_offset_var("{:3.1f}".format(self.rhcp_probe_offset_func))
self.analog_sig_source_x_0_0.set_frequency(self.rhcp_probe_offset_func+self.samp_rate/4)
self.analog_sig_source_x_0.set_frequency(self.rhcp_probe_offset_func+self.samp_rate/4)
def get_rhcp_probe_noise_func(self):
return self.rhcp_probe_noise_func
def set_rhcp_probe_noise_func(self, rhcp_probe_noise_func):
self.rhcp_probe_noise_func = rhcp_probe_noise_func
self.set_rhcp_noise_var("{:3.3f}".format(self.rhcp_probe_noise_func))
def get_lhcp_probe_snr_func(self):
return self.lhcp_probe_snr_func
def set_lhcp_probe_snr_func(self, lhcp_probe_snr_func):
self.lhcp_probe_snr_func = lhcp_probe_snr_func
self.set_lhcp_snr_var("{:3.3f}".format(self.lhcp_probe_snr_func))
def get_lhcp_probe_signal_func(self):
return self.lhcp_probe_signal_func
def set_lhcp_probe_signal_func(self, lhcp_probe_signal_func):
self.lhcp_probe_signal_func = lhcp_probe_signal_func
self.set_lhcp_signal_var("{:3.3f}".format(self.lhcp_probe_signal_func))
def get_lhcp_probe_offset_func(self):
return self.lhcp_probe_offset_func
def set_lhcp_probe_offset_func(self, lhcp_probe_offset_func):
self.lhcp_probe_offset_func = lhcp_probe_offset_func
self.set_lhcp_offset_var("{:3.1f}".format(self.lhcp_probe_offset_func))
def get_lhcp_probe_noise_func(self):
return self.lhcp_probe_noise_func
def set_lhcp_probe_noise_func(self, lhcp_probe_noise_func):
self.lhcp_probe_noise_func = lhcp_probe_noise_func
self.set_lhcp_noise_var("{:3.3f}".format(self.lhcp_probe_noise_func))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
Qt.QMetaObject.invokeMethod(self._samp_rate_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.samp_rate)))
self.uhd_usrp_source_1.set_samp_rate(self.samp_rate)
self.uhd_usrp_source_1.set_center_freq(uhd.tune_request(self.rx_freq, self.samp_rate/2), 0)
self.uhd_usrp_source_1.set_center_freq(uhd.tune_request(self.rx_freq, self.samp_rate/2), 1)
self.qtgui_waterfall_sink_x_0_0.set_frequency_range(0, self.samp_rate)
self.qtgui_waterfall_sink_x_0.set_frequency_range(0, self.samp_rate)
self.qtgui_freq_sink_x_0_0.set_frequency_range(0, self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(0, self.samp_rate)
self.set_keep_n(self.samp_rate/self.record_hz)
self.blocks_multiply_const_vxx_0_0_0.set_k((self.samp_rate/(2*math.pi), ))
self.blocks_multiply_const_vxx_0_0.set_k((self.samp_rate/(2*math.pi), ))
self.blocks_moving_average_xx_0_1_0.set_length_and_scale(int(self.samp_rate/self.record_hz), 1.0/(self.samp_rate/self.record_hz))
self.blocks_moving_average_xx_0_1.set_length_and_scale(int(self.samp_rate/self.record_hz), 1.0/(self.samp_rate/self.record_hz))
self.blocks_keep_one_in_n_0_0_1.set_n(int(self.samp_rate/self.record_hz))
self.blocks_keep_one_in_n_0_0.set_n(int(self.samp_rate/self.record_hz))
self.analog_sig_source_x_0_0.set_sampling_freq(self.samp_rate)
self.analog_sig_source_x_0_0.set_frequency(self.rhcp_probe_offset_func+self.samp_rate/4)
self.analog_sig_source_x_0.set_sampling_freq(self.samp_rate)
self.analog_sig_source_x_0.set_frequency(self.rhcp_probe_offset_func+self.samp_rate/4)
self.set_alpha(1.0/(self.samp_rate/self.record_hz))
def get_rhcp_snr_var(self):
return self.rhcp_snr_var
def set_rhcp_snr_var(self, rhcp_snr_var):
self.rhcp_snr_var = rhcp_snr_var
self.set_rhcp_snr_label(self._rhcp_snr_label_formatter(self.rhcp_snr_var))
def get_rhcp_signal_var(self):
return self.rhcp_signal_var
def set_rhcp_signal_var(self, rhcp_signal_var):
self.rhcp_signal_var = rhcp_signal_var
self.set_rhcp_signal_label(self._rhcp_signal_label_formatter(self.rhcp_signal_var))
def get_rhcp_offset_var(self):
return self.rhcp_offset_var
def set_rhcp_offset_var(self, rhcp_offset_var):
self.rhcp_offset_var = rhcp_offset_var
self.set_rhcp_offset_label(self._rhcp_offset_label_formatter(self.rhcp_offset_var))
def get_rhcp_noise_var(self):
return self.rhcp_noise_var
def set_rhcp_noise_var(self, rhcp_noise_var):
self.rhcp_noise_var = rhcp_noise_var
self.set_rhcp_noise_label(self._rhcp_noise_label_formatter(self.rhcp_noise_var))
def get_lhcp_snr_var(self):
return self.lhcp_snr_var
def set_lhcp_snr_var(self, lhcp_snr_var):
self.lhcp_snr_var = lhcp_snr_var
self.set_lchp_snr_label(self._lchp_snr_label_formatter(self.lhcp_snr_var))
def get_lhcp_signal_var(self):
return self.lhcp_signal_var
def set_lhcp_signal_var(self, lhcp_signal_var):
self.lhcp_signal_var = lhcp_signal_var
self.set_lhcp_signal_label(self._lhcp_signal_label_formatter(self.lhcp_signal_var))
def get_lhcp_offset_var(self):
return self.lhcp_offset_var
def set_lhcp_offset_var(self, lhcp_offset_var):
self.lhcp_offset_var = lhcp_offset_var
self.set_lhcp_offset_label(self._lhcp_offset_label_formatter(self.lhcp_offset_var))
def get_lhcp_noise_var(self):
return self.lhcp_noise_var
def set_lhcp_noise_var(self, lhcp_noise_var):
self.lhcp_noise_var = lhcp_noise_var
self.set_lhcp_noise_label(self._lhcp_noise_label_formatter(self.lhcp_noise_var))
def get_fn_rhcp(self):
return self.fn_rhcp
def set_fn_rhcp(self, fn_rhcp):
self.fn_rhcp = fn_rhcp
self.set_fp_rhcp("{:s}/{:s}".format(self.path, self.fn_rhcp))
def get_fn_lhcp(self):
return self.fn_lhcp
def set_fn_lhcp(self, fn_lhcp):
self.fn_lhcp = fn_lhcp
self.set_fp_lhcp("{:s}/{:s}".format(self.path, self.fn_lhcp))
def get_rx_gain(self):
return self.rx_gain
def set_rx_gain(self, rx_gain):
self.rx_gain = rx_gain
Qt.QMetaObject.invokeMethod(self._rx_gain_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.rx_gain)))
self.uhd_usrp_source_1.set_gain(self.rx_gain, 0)
self.uhd_usrp_source_1.set_gain(self.rx_gain, 1)
def get_rx_freq(self):
return self.rx_freq
def set_rx_freq(self, rx_freq):
self.rx_freq = rx_freq
Qt.QMetaObject.invokeMethod(self._rx_freq_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.rx_freq)))
self.uhd_usrp_source_1.set_center_freq(uhd.tune_request(self.rx_freq, self.samp_rate/2), 0)
self.uhd_usrp_source_1.set_center_freq(uhd.tune_request(self.rx_freq, self.samp_rate/2), 1)
def get_rhcp_snr_label(self):
return self.rhcp_snr_label
def set_rhcp_snr_label(self, rhcp_snr_label):
self.rhcp_snr_label = rhcp_snr_label
Qt.QMetaObject.invokeMethod(self._rhcp_snr_label_label, "setText", Qt.Q_ARG("QString", self.rhcp_snr_label))
def get_rhcp_signal_label(self):
return self.rhcp_signal_label
def set_rhcp_signal_label(self, rhcp_signal_label):
self.rhcp_signal_label = rhcp_signal_label
Qt.QMetaObject.invokeMethod(self._rhcp_signal_label_label, "setText", Qt.Q_ARG("QString", self.rhcp_signal_label))
def get_rhcp_offset_label(self):
return self.rhcp_offset_label
def set_rhcp_offset_label(self, rhcp_offset_label):
self.rhcp_offset_label = rhcp_offset_label
Qt.QMetaObject.invokeMethod(self._rhcp_offset_label_label, "setText", Qt.Q_ARG("QString", self.rhcp_offset_label))
def get_rhcp_noise_label(self):
return self.rhcp_noise_label
def set_rhcp_noise_label(self, rhcp_noise_label):
self.rhcp_noise_label = rhcp_noise_label
Qt.QMetaObject.invokeMethod(self._rhcp_noise_label_label, "setText", Qt.Q_ARG("QString", self.rhcp_noise_label))
def get_lhcp_signal_label(self):
return self.lhcp_signal_label
def set_lhcp_signal_label(self, lhcp_signal_label):
self.lhcp_signal_label = lhcp_signal_label
Qt.QMetaObject.invokeMethod(self._lhcp_signal_label_label, "setText", Qt.Q_ARG("QString", self.lhcp_signal_label))
def get_lhcp_offset_label(self):
return self.lhcp_offset_label
def set_lhcp_offset_label(self, lhcp_offset_label):
self.lhcp_offset_label = lhcp_offset_label
Qt.QMetaObject.invokeMethod(self._lhcp_offset_label_label, "setText", Qt.Q_ARG("QString", self.lhcp_offset_label))
def get_lhcp_noise_label(self):
return self.lhcp_noise_label
def set_lhcp_noise_label(self, lhcp_noise_label):
self.lhcp_noise_label = lhcp_noise_label
Qt.QMetaObject.invokeMethod(self._lhcp_noise_label_label, "setText", Qt.Q_ARG("QString", self.lhcp_noise_label))
def get_lchp_snr_label(self):
return self.lchp_snr_label
def set_lchp_snr_label(self, lchp_snr_label):
self.lchp_snr_label = lchp_snr_label
Qt.QMetaObject.invokeMethod(self._lchp_snr_label_label, "setText", Qt.Q_ARG("QString", self.lchp_snr_label))
def get_keep_n(self):
return self.keep_n
def set_keep_n(self, keep_n):
self.keep_n = keep_n
Qt.QMetaObject.invokeMethod(self._keep_n_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.keep_n)))
def get_fp_rhcp(self):
return self.fp_rhcp
def set_fp_rhcp(self, fp_rhcp):
self.fp_rhcp = fp_rhcp
def get_fp_lhcp(self):
return self.fp_lhcp
def set_fp_lhcp(self, fp_lhcp):
self.fp_lhcp = fp_lhcp
def get_fft_min(self):
return self.fft_min
def set_fft_min(self, fft_min):
self.fft_min = fft_min
Qt.QMetaObject.invokeMethod(self._fft_min_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.fft_min)))
self.qtgui_waterfall_sink_x_0_0.set_intensity_range(self.fft_min, self.fft_max)
self.qtgui_waterfall_sink_x_0.set_intensity_range(self.fft_min, self.fft_max)
self.qtgui_freq_sink_x_0_0.set_y_axis(self.fft_min, self.fft_max)
self.qtgui_freq_sink_x_0.set_y_axis(self.fft_min, self.fft_max)
def get_fft_max(self):
return self.fft_max
def set_fft_max(self, fft_max):
self.fft_max = fft_max
Qt.QMetaObject.invokeMethod(self._fft_max_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.fft_max)))
self.qtgui_waterfall_sink_x_0_0.set_intensity_range(self.fft_min, self.fft_max)
self.qtgui_waterfall_sink_x_0.set_intensity_range(self.fft_min, self.fft_max)
self.qtgui_freq_sink_x_0_0.set_y_axis(self.fft_min, self.fft_max)
self.qtgui_freq_sink_x_0.set_y_axis(self.fft_min, self.fft_max)
def get_decim_0(self):
return self.decim_0
def set_decim_0(self, decim_0):
self.decim_0 = decim_0
def get_decim(self):
return self.decim
def set_decim(self, decim):
self.decim = decim
self.blocks_moving_average_xx_0_0_0.set_length_and_scale(int(self.avg_len), 1.0/(self.avg_len)/(self.nfft/self.decim*2))
self.blocks_moving_average_xx_0_0.set_length_and_scale(int(self.avg_len), 1.0/(self.avg_len)/(self.nfft/self.decim*2))
def get_alpha(self):
return self.alpha
def set_alpha(self, alpha):
self.alpha = alpha
Qt.QMetaObject.invokeMethod(self._alpha_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.alpha)))
def argument_parser():
parser = OptionParser(usage="%prog: [options]", option_class=eng_option)
parser.add_option(
"", "--avg-len", dest="avg_len", type="eng_float", default=eng_notation.num_to_str(256),
help="Set avg_len [default=%default]")
parser.add_option(
"", "--nfft", dest="nfft", type="intx", default=2048,
help="Set nfft [default=%default]")
parser.add_option(
"", "--path", dest="path", type="string", default="/data2/captures/20200329",
help="Set path [default=%default]")
parser.add_option(
"", "--record-hz", dest="record_hz", type="intx", default=10,
help="Set record_hz [default=%default]")
parser.add_option(
"", "--rx-alt", dest="rx_alt", type="eng_float", default=eng_notation.num_to_str(542),
help="Set rx_alt [default=%default]")
parser.add_option(
"", "--rx-lat", dest="rx_lat", type="eng_float", default=eng_notation.num_to_str(37.148745),
help="Set rx_lat [default=%default]")
parser.add_option(
"", "--rx-lon", dest="rx_lon", type="eng_float", default=eng_notation.num_to_str(-80.578557),
help="Set rx_lon [default=%default]")
parser.add_option(
"", "--signal-type", dest="signal_type", type="string", default='LRO',
help="Set signal_type [default=%default]")
parser.add_option(
"", "--usrp-type", dest="usrp_type", type="string", default='B210',
help="Set usrp_type [default=%default]")
return parser
def main(top_block_cls=lro_track_2, options=None):
if options is None:
options, _ = argument_parser().parse_args()
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls(avg_len=options.avg_len, nfft=options.nfft, path=options.path, record_hz=options.record_hz, rx_alt=options.rx_alt, rx_lat=options.rx_lat, rx_lon=options.rx_lon, signal_type=options.signal_type, usrp_type=options.usrp_type)
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
|
installwizard.py
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea)
from electrum_spectrumcash.wallet import Wallet, Abstract_Wallet
from electrum_spectrumcash.storage import WalletStorage
from electrum_spectrumcash.util import UserCancelled, InvalidPassword, WalletFileException
from electrum_spectrumcash.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum_spectrumcash.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from electrum_spectrumcash.plugin import run_hook
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
run_next(*out)
except GoBack:
if wizard.can_go_back():
wizard.go_back()
return
else:
wizard.close()
raise
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('SpectrumCash-Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum-spectrumcash.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('SpectrumCash-Electrum wallet'))
self.temp_storage = WalletStorage(path, manual_upgrades=True)
wallet_folder = os.path.dirname(self.temp_storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.temp_storage = wallet_from_memory.storage
else:
self.temp_storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
self.logger.exception('')
self.temp_storage = None
self.next_button.setEnabled(False)
user_needs_to_enter_password = False
if self.temp_storage:
if not self.temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if self.temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif self.temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
else:
msg = _('Cannot read file')
self.msg_label.setText(msg)
if user_needs_to_enter_password:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.temp_storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
if self.temp_storage.file_exists() and not self.temp_storage.is_encrypted():
break
if not self.temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if self.temp_storage.file_exists() and self.temp_storage.is_encrypted():
if self.temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=str(e))
raise UserCancelled()
elif self.temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=self.temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=str(e))
raise UserCancelled()
if self.temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return self.temp_storage.path, (self.temp_storage if self.temp_storage.file_exists() else None) #
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
main.py
|
import threading
from queue import Queue
from spider import Spider
from domain import *
from general import *
#This is the main part of the program that creates multithreading functionality. Multithreading allows the webscraper to run
#quicker and more efficiently.
PROJECT_NAME='tripadvisor'
HOMEPAGE= 'https://www.tripadvisor.ca/Hotel_Review-g60763-d2079052-Reviews-YOTEL_New_York-New_York_City_New_York.html'
DOMAIN_NAME = get_domain(HOMEPAGE)
QUEUE_FILE = PROJECT_NAME+'/queue.txt'
CRAWLED_FILE = PROJECT_NAME+'/crawled.txt'
NUMBER_OF_THREADS = 8
queue = Queue()
Spider(PROJECT_NAME,HOMEPAGE,DOMAIN_NAME)
# Worker threads (daemon: will end when main thread ends)
def workers():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=to_do)
t.daemon = True
t.start()
# This will run the next task in the queue
def to_do():
while True:
url = queue.get()
Spider.crawl_page(threading.current_thread().name, url)
queue.task_done()
# New jobs
def jobs():
for link in file_to_set(QUEUE_FILE):
queue.put(link)
queue.join()
crawl()
# This will crawl items in the queue if they exist
def crawl():
queued_links = file_to_set(QUEUE_FILE)
if len(queued_links) > 0:
print(str(len(queued_links)) + ' links in the queue')
jobs()
workers()
crawl()
|
test.py
|
import gzip
import json
import logging
import os
import io
import random
import threading
import time
import helpers.client
import pytest
from helpers.cluster import ClickHouseCluster, ClickHouseInstance, get_instances_dir
from helpers.network import PartitionManager
from helpers.test_tools import exec_query_with_retry
MINIO_INTERNAL_PORT = 9001
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(
SCRIPT_DIR, "./{}/dummy/configs/config.d/defaultS3.xml".format(get_instances_dir())
)
# Creates S3 bucket for tests and allows anonymous read-write access to it.
def prepare_s3_bucket(started_cluster):
# Allows read-write access for bucket without authorization.
bucket_read_write_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::root",
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::root",
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::root/*",
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::root/*",
},
],
}
minio_client = started_cluster.minio_client
minio_client.set_bucket_policy(
started_cluster.minio_bucket, json.dumps(bucket_read_write_policy)
)
started_cluster.minio_restricted_bucket = "{}-with-auth".format(
started_cluster.minio_bucket
)
if minio_client.bucket_exists(started_cluster.minio_restricted_bucket):
minio_client.remove_bucket(started_cluster.minio_restricted_bucket)
minio_client.make_bucket(started_cluster.minio_restricted_bucket)
def put_s3_file_content(started_cluster, bucket, filename, data):
buf = io.BytesIO(data)
started_cluster.minio_client.put_object(bucket, filename, buf, len(data))
# Returns content of given S3 file as string.
def get_s3_file_content(started_cluster, bucket, filename, decode=True):
# type: (ClickHouseCluster, str, str, bool) -> str
data = started_cluster.minio_client.get_object(bucket, filename)
data_str = b""
for chunk in data.stream():
data_str += chunk
if decode:
return data_str.decode()
return data_str
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance(
"restricted_dummy",
main_configs=["configs/config_for_test_remote_host_filter.xml"],
with_minio=True,
)
cluster.add_instance(
"dummy",
with_minio=True,
main_configs=["configs/defaultS3.xml", "configs/named_collections.xml"],
)
cluster.add_instance(
"s3_max_redirects",
with_minio=True,
main_configs=["configs/defaultS3.xml"],
user_configs=["configs/s3_max_redirects.xml"],
)
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
prepare_s3_bucket(cluster)
logging.info("S3 bucket created")
run_s3_mocks(cluster)
yield cluster
finally:
cluster.shutdown()
def run_query(instance, query, stdin=None, settings=None):
# type: (ClickHouseInstance, str, object, dict) -> str
logging.info("Running query '{}'...".format(query))
result = instance.query(query, stdin=stdin, settings=settings)
logging.info("Query finished")
return result
# Test simple put. Also checks that wrong credentials produce an error with every compression method.
@pytest.mark.parametrize(
"maybe_auth,positive,compression",
[
pytest.param("", True, "auto", id="positive"),
pytest.param("'minio','minio123',", True, "auto", id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, "auto", id="auto"),
pytest.param("'wrongid','wrongkey',", False, "gzip", id="gzip"),
pytest.param("'wrongid','wrongkey',", False, "deflate", id="deflate"),
pytest.param("'wrongid','wrongkey',", False, "brotli", id="brotli"),
pytest.param("'wrongid','wrongkey',", False, "xz", id="xz"),
pytest.param("'wrongid','wrongkey',", False, "zstd", id="zstd"),
],
)
def test_put(started_cluster, maybe_auth, positive, compression):
# type: (ClickHouseCluster) -> None
bucket = (
started_cluster.minio_bucket
if not maybe_auth
else started_cluster.minio_restricted_bucket
)
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test.csv"
put_query = f"""insert into table function s3('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{filename}',
{maybe_auth}'CSV', '{table_format}', '{compression}') values settings s3_truncate_on_insert=1 {values}"""
try:
run_query(instance, put_query)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
def test_partition_by(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
partition_by = "column3"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
filename = "test_{_partition_id}.csv"
put_query = f"""INSERT INTO TABLE FUNCTION
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}')
PARTITION BY {partition_by} VALUES {values}"""
run_query(instance, put_query)
assert "1,2,3\n" == get_s3_file_content(started_cluster, bucket, "test_3.csv")
assert "3,2,1\n" == get_s3_file_content(started_cluster, bucket, "test_1.csv")
assert "78,43,45\n" == get_s3_file_content(started_cluster, bucket, "test_45.csv")
filename = "test2_{_partition_id}.csv"
instance.query(
f"create table p ({table_format}) engine=S3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV') partition by column3"
)
instance.query(f"insert into p values {values}")
assert "1,2,3\n" == get_s3_file_content(started_cluster, bucket, "test2_3.csv")
assert "3,2,1\n" == get_s3_file_content(started_cluster, bucket, "test2_1.csv")
assert "78,43,45\n" == get_s3_file_content(started_cluster, bucket, "test2_45.csv")
def test_partition_by_string_column(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "col_num UInt32, col_str String"
partition_by = "col_str"
values = "(1, 'foo/bar'), (3, 'йцук'), (78, '你好')"
filename = "test_{_partition_id}.csv"
put_query = f"""INSERT INTO TABLE FUNCTION
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}')
PARTITION BY {partition_by} VALUES {values}"""
run_query(instance, put_query)
assert '1,"foo/bar"\n' == get_s3_file_content(
started_cluster, bucket, "test_foo/bar.csv"
)
assert '3,"йцук"\n' == get_s3_file_content(started_cluster, bucket, "test_йцук.csv")
assert '78,"你好"\n' == get_s3_file_content(started_cluster, bucket, "test_你好.csv")
def test_partition_by_const_column(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
partition_by = "'88'"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test_{_partition_id}.csv"
put_query = f"""INSERT INTO TABLE FUNCTION
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}')
PARTITION BY {partition_by} VALUES {values}"""
run_query(instance, put_query)
assert values_csv == get_s3_file_content(started_cluster, bucket, "test_88.csv")
@pytest.mark.parametrize("special", ["space", "plus"])
def test_get_file_with_special(started_cluster, special):
symbol = {"space": " ", "plus": "+"}[special]
urlsafe_symbol = {"space": "%20", "plus": "%2B"}[special]
auth = "'minio','minio123',"
bucket = started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = [
[12549, 2463, 19893],
[64021, 38652, 66703],
[81611, 39650, 83516],
[11079, 59507, 61546],
[51764, 69952, 6876],
[41165, 90293, 29095],
[40167, 78432, 48309],
[81629, 81327, 11855],
[55852, 21643, 98507],
[6738, 54643, 41155],
]
values_csv = (
"\n".join((",".join(map(str, row)) for row in values)) + "\n"
).encode()
filename = f"get_file_with_{special}_{symbol}two.csv"
put_s3_file_content(started_cluster, bucket, filename, values_csv)
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}two.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [
list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()
] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [
list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()
] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [
list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()
] == values
@pytest.mark.parametrize("special", ["space", "plus", "plus2"])
def test_get_path_with_special(started_cluster, special):
symbol = {"space": "%20", "plus": "%2B", "plus2": "%2B"}[special]
safe_symbol = {"space": "%20", "plus": "+", "plus2": "%2B"}[special]
auth = "'minio','minio123',"
table_format = "column1 String"
instance = started_cluster.instances["dummy"]
get_query = f"SELECT * FROM s3('http://resolver:8082/get-my-path/{safe_symbol}.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert run_query(instance, get_query).splitlines() == [f"/{symbol}.csv"]
# Test put no data to S3.
@pytest.mark.parametrize("auth", [pytest.param("'minio','minio123',", id="minio")])
def test_empty_put(started_cluster, auth):
# type: (ClickHouseCluster, str) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
drop_empty_table_query = "DROP TABLE IF EXISTS empty_table"
create_empty_table_query = """
CREATE TABLE empty_table (
{}
) ENGINE = Null()
""".format(
table_format
)
run_query(instance, drop_empty_table_query)
run_query(instance, create_empty_table_query)
filename = "empty_put_test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') select * from empty_table".format(
started_cluster.minio_ip,
MINIO_INTERNAL_PORT,
bucket,
filename,
auth,
table_format,
)
run_query(instance, put_query)
try:
run_query(
instance,
"select count(*) from s3('http://{}:{}/{}/{}', {}'CSV', '{}')".format(
started_cluster.minio_ip,
MINIO_INTERNAL_PORT,
bucket,
filename,
auth,
table_format,
),
)
assert False, "Query should be failed."
except helpers.client.QueryRuntimeException as e:
assert str(e).find("The specified key does not exist") != 0
# Test put values in CSV format.
@pytest.mark.parametrize(
"maybe_auth,positive",
[
pytest.param("", True, id="positive"),
pytest.param("'minio','minio123',", True, id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, id="negative"),
],
)
def test_put_csv(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster, bool, str) -> None
bucket = (
started_cluster.minio_bucket
if not maybe_auth
else started_cluster.minio_restricted_bucket
)
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV settings s3_truncate_on_insert=1".format(
started_cluster.minio_ip,
MINIO_INTERNAL_PORT,
bucket,
filename,
maybe_auth,
table_format,
)
csv_data = "8,9,16\n11,18,13\n22,14,2\n"
try:
run_query(instance, put_query, stdin=csv_data)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
# Test put and get with S3 server redirect.
def test_put_get_with_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
values_csv = "1,1,1\n1,1,1\n11,11,11\n"
filename = "test.csv"
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format(
started_cluster.minio_redirect_host,
started_cluster.minio_redirect_port,
bucket,
filename,
table_format,
values,
)
run_query(instance, query)
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/{}', 'CSV', '{}')".format(
started_cluster.minio_redirect_host,
started_cluster.minio_redirect_port,
bucket,
filename,
table_format,
)
stdout = run_query(instance, query)
assert list(map(str.split, stdout.splitlines())) == [
["1", "1", "1", "1"],
["1", "1", "1", "1"],
["11", "11", "11", "1331"],
]
# Test put with restricted S3 server redirect.
def test_put_with_zero_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["s3_max_redirects"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
filename = "test.csv"
# Should work without redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format(
started_cluster.minio_ip,
MINIO_INTERNAL_PORT,
bucket,
filename,
table_format,
values,
)
run_query(instance, query)
# Should not work with redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format(
started_cluster.minio_redirect_host,
started_cluster.minio_redirect_port,
bucket,
filename,
table_format,
values,
)
exception_raised = False
try:
run_query(instance, query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
def test_put_get_with_globs(started_cluster):
# type: (ClickHouseCluster) -> None
unique_prefix = random.randint(1, 10000)
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
for i in range(10):
for j in range(10):
path = "{}/{}_{}/{}.csv".format(
unique_prefix, i, random.choice(["a", "b", "c", "d"]), j
)
max_path = max(path, max_path)
values = "({},{},{})".format(i, j, i + j)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip,
MINIO_INTERNAL_PORT,
bucket,
path,
table_format,
values,
)
run_query(instance, query)
query = "select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from s3('http://{}:{}/{}/{}/*_{{a,b,c,d}}/%3f.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host,
started_cluster.minio_redirect_port,
bucket,
unique_prefix,
table_format,
)
assert run_query(instance, query).splitlines() == [
"450\t450\t900\t0.csv\t{bucket}/{max_path}".format(
bucket=bucket, max_path=max_path
)
]
minio = started_cluster.minio_client
for obj in list(
minio.list_objects(
started_cluster.minio_bucket,
prefix="{}/".format(unique_prefix),
recursive=True,
)
):
minio.remove_object(started_cluster.minio_bucket, obj.object_name)
# Test multipart put.
@pytest.mark.parametrize(
"maybe_auth,positive",
[
pytest.param("", True, id="positive"),
pytest.param("'wrongid','wrongkey'", False, id="negative"),
# ("'minio','minio123',",True), Redirect with credentials not working with nginx.
],
)
def test_multipart_put(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = (
started_cluster.minio_bucket
if not maybe_auth
else started_cluster.minio_restricted_bucket
)
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
# Minimum size of part is 5 Mb for Minio.
# See: https://github.com/minio/minio/blob/master/docs/minio-limits.md
min_part_size_bytes = 5 * 1024 * 1024
csv_size_bytes = int(min_part_size_bytes * 1.5) # To have 2 parts.
one_line_length = 6 # 3 digits, 2 commas, 1 line separator.
# Generate data having size more than one part
int_data = [[1, 2, 3] for i in range(csv_size_bytes // one_line_length)]
csv_data = "".join(["{},{},{}\n".format(x, y, z) for x, y, z in int_data])
assert len(csv_data) > min_part_size_bytes
filename = "test_multipart.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
started_cluster.minio_redirect_host,
started_cluster.minio_redirect_port,
bucket,
filename,
maybe_auth,
table_format,
)
try:
run_query(
instance,
put_query,
stdin=csv_data,
settings={
"s3_min_upload_part_size": min_part_size_bytes,
"s3_max_single_part_upload_size": 0,
},
)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
# Use proxy access logs to count number of parts uploaded to Minio.
proxy_logs = started_cluster.get_container_logs("proxy1") # type: str
assert proxy_logs.count("PUT /{}/{}".format(bucket, filename)) >= 2
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
def test_remote_host_filter(started_cluster):
instance = started_cluster.instances["restricted_dummy"]
format = "column1 UInt32, column2 UInt32, column3 UInt32"
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/test.csv', 'CSV', '{}')".format(
"invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format
)
assert "not allowed in configuration file" in instance.query_and_get_error(query)
other_values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
query = "insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format(
"invalid_host",
MINIO_INTERNAL_PORT,
started_cluster.minio_bucket,
format,
other_values,
)
assert "not allowed in configuration file" in instance.query_and_get_error(query)
def test_wrong_s3_syntax(started_cluster):
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
expected_err_msg = "Code: 42" # NUMBER_OF_ARGUMENTS_DOESNT_MATCH
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3('', '', '', '', '', '')"
assert expected_err_msg in instance.query_and_get_error(query)
expected_err_msg = "Code: 36" # BAD_ARGUMENTS
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3('')"
assert expected_err_msg in instance.query_and_get_error(query)
# https://en.wikipedia.org/wiki/One_Thousand_and_One_Nights
def test_s3_glob_scheherazade(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
values = "(1, 1, 1)"
nights_per_job = 1001 // 30
jobs = []
for night in range(0, 1001, nights_per_job):
def add_tales(start, end):
for i in range(start, end):
path = "night_{}/tale.csv".format(i)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip,
MINIO_INTERNAL_PORT,
bucket,
path,
table_format,
values,
)
run_query(instance, query)
jobs.append(
threading.Thread(
target=add_tales, args=(night, min(night + nights_per_job, 1001))
)
)
jobs[-1].start()
for job in jobs:
job.join()
query = "select count(), sum(column1), sum(column2), sum(column3) from s3('http://{}:{}/{}/night_*/tale.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host,
started_cluster.minio_redirect_port,
bucket,
table_format,
)
assert run_query(instance, query).splitlines() == ["1001\t1001\t1001\t1001"]
def run_s3_mocks(started_cluster):
logging.info("Starting s3 mocks")
mocks = (
("mock_s3.py", "resolver", "8080"),
("unstable_server.py", "resolver", "8081"),
("echo.py", "resolver", "8082"),
)
for mock_filename, container, port in mocks:
container_id = started_cluster.get_container_id(container)
current_dir = os.path.dirname(__file__)
started_cluster.copy_file_to_container(
container_id,
os.path.join(current_dir, "s3_mocks", mock_filename),
mock_filename,
)
started_cluster.exec_in_container(
container_id, ["python", mock_filename, port], detach=True
)
# Wait for S3 mocks to start
for mock_filename, container, port in mocks:
num_attempts = 100
for attempt in range(num_attempts):
ping_response = started_cluster.exec_in_container(
started_cluster.get_container_id(container),
["curl", "-s", f"http://localhost:{port}/"],
nothrow=True,
)
if ping_response != "OK":
if attempt == num_attempts - 1:
assert ping_response == "OK", 'Expected "OK", but got "{}"'.format(
ping_response
)
else:
time.sleep(1)
else:
logging.debug(
f"mock {mock_filename} ({port}) answered {ping_response} on attempt {attempt}"
)
break
logging.info("S3 mocks started")
def replace_config(old, new):
config = open(CONFIG_PATH, "r")
config_lines = config.readlines()
config.close()
config_lines = [line.replace(old, new) for line in config_lines]
config = open(CONFIG_PATH, "w")
config.writelines(config_lines)
config.close()
def test_custom_auth_headers(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format,
)
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
result = run_query(instance, get_query)
assert result == "1\t2\t3\n"
instance.query("DROP TABLE IF EXISTS test")
instance.query(
"CREATE TABLE test ({table_format}) ENGINE = S3('http://resolver:8080/{bucket}/{file}', 'CSV')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format,
)
)
assert run_query(instance, "SELECT * FROM test") == "1\t2\t3\n"
replace_config(
"<header>Authorization: Bearer TOKEN",
"<header>Authorization: Bearer INVALID_TOKEN",
)
instance.query("SYSTEM RELOAD CONFIG")
ret, err = instance.query_and_get_answer_with_error("SELECT * FROM test")
assert ret == "" and err != ""
replace_config(
"<header>Authorization: Bearer INVALID_TOKEN",
"<header>Authorization: Bearer TOKEN",
)
instance.query("SYSTEM RELOAD CONFIG")
assert run_query(instance, "SELECT * FROM test") == "1\t2\t3\n"
instance.query("DROP TABLE test")
def test_custom_auth_headers_exclusion(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"SELECT * FROM s3('http://resolver:8080/{started_cluster.minio_restricted_bucket}/restricteddirectory/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
with pytest.raises(helpers.client.QueryRuntimeException) as ei:
result = run_query(instance, get_query)
print(result)
assert ei.value.returncode == 243
assert "Forbidden Error" in ei.value.stderr
def test_infinite_redirect(started_cluster):
bucket = "redirected"
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"select * from s3('http://resolver:{started_cluster.minio_redirect_port}/{bucket}/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
exception_raised = False
try:
run_query(instance, get_query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
@pytest.mark.parametrize(
"extension,method",
[
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz"),
],
)
def test_storage_s3_get_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_get_gzip.{extension}"
name = f"test_get_gzip_{extension}"
data = [
"Sophia Intrieri,55",
"Jack Taylor,71",
"Christopher Silva,66",
"Clifton Purser,35",
"Richard Aceuedo,43",
"Lisa Hensley,31",
"Alice Wehrley,1",
"Mary Farmer,47",
"Samara Ramirez,19",
"Shirley Lloyd,51",
"Santos Cowger,0",
"Richard Mundt,88",
"Jerry Gonzalez,15",
"Angela James,10",
"Norman Ortega,33",
"",
]
run_query(instance, f"DROP TABLE IF EXISTS {name}")
buf = io.BytesIO()
compressed = gzip.GzipFile(fileobj=buf, mode="wb")
compressed.write(("\n".join(data)).encode())
compressed.close()
put_s3_file_content(started_cluster, bucket, filename, buf.getvalue())
run_query(
instance,
f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""",
)
run_query(instance, f"SELECT sum(id) FROM {name}").splitlines() == ["565"]
run_query(instance, f"DROP TABLE {name}")
def test_storage_s3_get_unstable(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 Int64, column2 Int64, column3 Int64, column4 Int64"
get_query = f"SELECT count(), sum(column3), sum(column4) FROM s3('http://resolver:8081/{started_cluster.minio_bucket}/test.csv', 'CSV', '{table_format}') FORMAT CSV"
result = run_query(instance, get_query)
assert result.splitlines() == ["500001,500000,0"]
def test_storage_s3_put_uncompressed(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = "test_put_uncompressed.bin"
name = "test_put_uncompressed"
data = [
"'Gloria Thompson',99",
"'Matthew Tang',98",
"'Patsy Anderson',23",
"'Nancy Badillo',93",
"'Roy Hunt',5",
"'Adam Kirk',51",
"'Joshua Douds',28",
"'Jolene Ryan',0",
"'Roxanne Padilla',50",
"'Howard Roberts',41",
"'Ricardo Broughton',13",
"'Roland Speer',83",
"'Cathy Cohan',58",
"'Kathie Dawson',100",
"'Gregg Mcquistion',11",
]
run_query(
instance,
"CREATE TABLE {} (name String, id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename
),
)
run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data)))
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["753"]
uncompressed_content = get_s3_file_content(started_cluster, bucket, filename)
assert sum([int(i.split(",")[1]) for i in uncompressed_content.splitlines()]) == 753
@pytest.mark.parametrize(
"extension,method",
[pytest.param("bin", "gzip", id="bin"), pytest.param("gz", "auto", id="gz")],
)
def test_storage_s3_put_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_put_gzip.{extension}"
name = f"test_put_gzip_{extension}"
data = [
"'Joseph Tomlinson',5",
"'Earnest Essary',44",
"'Matha Pannell',24",
"'Michael Shavers',46",
"'Elias Groce',38",
"'Pamela Bramlet',50",
"'Lewis Harrell',49",
"'Tamara Fyall',58",
"'George Dixon',38",
"'Alice Walls',49",
"'Paula Mais',24",
"'Myrtle Pelt',93",
"'Sylvia Naffziger',18",
"'Amanda Cave',83",
"'Yolanda Joseph',89",
]
run_query(
instance,
f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""",
)
run_query(instance, f"INSERT INTO {name} VALUES ({'),('.join(data)})")
run_query(instance, f"SELECT sum(id) FROM {name}").splitlines() == ["708"]
buf = io.BytesIO(
get_s3_file_content(started_cluster, bucket, filename, decode=False)
)
f = gzip.GzipFile(fileobj=buf, mode="rb")
uncompressed_content = f.read().decode()
assert sum([int(i.split(",")[1]) for i in uncompressed_content.splitlines()]) == 708
def test_truncate_table(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
name = "truncate"
instance.query(
"CREATE TABLE {} (id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, name
)
)
instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name))
result = instance.query("SELECT * FROM {}".format(name))
assert result == instance.query("SELECT number FROM numbers(10)")
instance.query("TRUNCATE TABLE {}".format(name))
minio = started_cluster.minio_client
timeout = 30
while timeout > 0:
if (
len(list(minio.list_objects(started_cluster.minio_bucket, "truncate/")))
== 0
):
return
timeout -= 1
time.sleep(1)
assert len(list(minio.list_objects(started_cluster.minio_bucket, "truncate/"))) == 0
assert instance.query("SELECT * FROM {}".format(name)) == ""
def test_predefined_connection_configuration(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
name = "test_table"
instance.query("drop table if exists {}".format(name))
instance.query(
"CREATE TABLE {} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')".format(name)
)
instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name))
result = instance.query("SELECT * FROM {}".format(name))
assert result == instance.query("SELECT number FROM numbers(10)")
result = instance.query(
"SELECT * FROM s3(s3_conf1, format='CSV', structure='id UInt32')"
)
assert result == instance.query("SELECT number FROM numbers(10)")
result = ""
def test_url_reconnect_in_the_middle(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_format = "id String, data String"
filename = "test_url_reconnect_{}.tsv".format(random.randint(0, 1000))
instance.query(
f"""insert into table function
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'TSV', '{table_format}')
select number, randomPrintableASCII(number % 1000) from numbers(1000000)"""
)
with PartitionManager() as pm:
pm_rule_reject = {
"probability": 0.02,
"destination": instance.ip_address,
"source_port": started_cluster.minio_port,
"action": "REJECT --reject-with tcp-reset",
}
pm_rule_drop_all = {
"destination": instance.ip_address,
"source_port": started_cluster.minio_port,
"action": "DROP",
}
pm._add_rule(pm_rule_reject)
def select():
global result
result = instance.query(
f"""select sum(cityHash64(x)) from (select toUInt64(id) + sleep(0.1) as x from
url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'TSV', '{table_format}')
settings http_max_tries = 10, http_retry_max_backoff_ms=2000, http_send_timeout=1, http_receive_timeout=1)"""
)
assert int(result) == 3914219105369203805
thread = threading.Thread(target=select)
thread.start()
time.sleep(4)
pm._add_rule(pm_rule_drop_all)
time.sleep(2)
pm._delete_rule(pm_rule_drop_all)
pm._delete_rule(pm_rule_reject)
thread.join()
assert int(result) == 3914219105369203805
def test_seekable_formats(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')"
instance.query(
f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) settings s3_truncate_on_insert=1"
)
result = instance.query(f"SELECT count() FROM {table_function}")
assert int(result) == 5000000
table_function = f"s3(s3_orc, structure='a Int32, b String', format='ORC')"
exec_query_with_retry(
instance,
f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) settings s3_truncate_on_insert=1",
)
result = instance.query(f"SELECT count() FROM {table_function}")
assert int(result) == 5000000
instance.query("SYSTEM FLUSH LOGS")
result = instance.query(
f"SELECT formatReadableSize(memory_usage) FROM system.query_log WHERE startsWith(query, 'SELECT count() FROM s3') AND memory_usage > 0 ORDER BY event_time desc"
)
result = result[: result.index(".")]
assert int(result) < 200
def test_seekable_formats_url(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')"
instance.query(
f"insert into table function {table_function} select number, randomString(100) from numbers(5000000) settings s3_truncate_on_insert=1"
)
table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_parquet', 'Parquet', 'a Int32, b String')"
result = instance.query(f"SELECT count() FROM {table_function}")
assert int(result) == 5000000
table_function = f"s3(s3_orc, structure='a Int32, b String', format='ORC')"
exec_query_with_retry(
instance,
f"insert into table function {table_function} select number, randomString(100) from numbers(5000000) settings s3_truncate_on_insert=1",
)
table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_orc', 'ORC', 'a Int32, b String')"
result = instance.query(f"SELECT count() FROM {table_function}")
assert int(result) == 5000000
instance.query("SYSTEM FLUSH LOGS")
result = instance.query(
f"SELECT formatReadableSize(memory_usage) FROM system.query_log WHERE startsWith(query, 'SELECT count() FROM url') AND memory_usage > 0 ORDER BY event_time desc"
)
result = result[: result.index(".")]
assert int(result) < 200
def test_empty_file(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
name = "empty"
url = f"http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{name}"
minio = started_cluster.minio_client
minio.put_object(bucket, name, io.BytesIO(b""), 0)
table_function = f"s3('{url}', 'CSV', 'id Int32')"
result = instance.query(f"SELECT count() FROM {table_function}")
assert int(result) == 0
def test_insert_with_path_with_globs(started_cluster):
instance = started_cluster.instances["dummy"]
table_function_3 = f"s3('http://minio1:9001/root/test_parquet*', 'minio', 'minio123', 'Parquet', 'a Int32, b String')"
instance.query_and_get_error(
f"insert into table function {table_function_3} SELECT number, randomString(100) FROM numbers(500)"
)
def test_s3_schema_inference(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
instance.query(
f"insert into table function s3(s3_native, structure='a Int32, b String', format='Native') select number, randomString(100) from numbers(5000000)"
)
result = instance.query(f"desc s3(s3_native, format='Native')")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from s3(s3_native, format='Native')")
assert int(result) == 5000000
instance.query(
f"create table schema_inference engine=S3(s3_native, format='Native')"
)
result = instance.query(f"desc schema_inference")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from schema_inference")
assert int(result) == 5000000
table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_native', 'Native')"
result = instance.query(f"desc {table_function}")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from {table_function}")
assert int(result) == 5000000
instance.query(
f"create table schema_inference_2 engine=URL('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_native', 'Native')"
)
result = instance.query(f"desc schema_inference_2")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from schema_inference_2")
assert int(result) == 5000000
table_function = f"s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_native', 'Native')"
result = instance.query(f"desc {table_function}")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from {table_function}")
assert int(result) == 5000000
def test_empty_file(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
name = "empty"
url = f"http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{name}"
minio = started_cluster.minio_client
minio.put_object(bucket, name, io.BytesIO(b""), 0)
table_function = f"s3('{url}', 'CSV', 'id Int32')"
result = instance.query(f"SELECT count() FROM {table_function}")
assert int(result) == 0
def test_overwrite(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')"
instance.query(f"create table test_overwrite as {table_function}")
instance.query(f"truncate table test_overwrite")
instance.query(
f"insert into test_overwrite select number, randomString(100) from numbers(50) settings s3_truncate_on_insert=1"
)
instance.query_and_get_error(
f"insert into test_overwrite select number, randomString(100) from numbers(100)"
)
instance.query(
f"insert into test_overwrite select number, randomString(100) from numbers(200) settings s3_truncate_on_insert=1"
)
result = instance.query(f"select count() from test_overwrite")
assert int(result) == 200
def test_create_new_files_on_insert(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')"
instance.query(f"create table test_multiple_inserts as {table_function}")
instance.query(f"truncate table test_multiple_inserts")
instance.query(
f"insert into test_multiple_inserts select number, randomString(100) from numbers(10) settings s3_truncate_on_insert=1"
)
instance.query(
f"insert into test_multiple_inserts select number, randomString(100) from numbers(20) settings s3_create_new_file_on_insert=1"
)
instance.query(
f"insert into test_multiple_inserts select number, randomString(100) from numbers(30) settings s3_create_new_file_on_insert=1"
)
result = instance.query(f"select count() from test_multiple_inserts")
assert int(result) == 60
instance.query(f"drop table test_multiple_inserts")
table_function = (
f"s3(s3_parquet_gz, structure='a Int32, b String', format='Parquet')"
)
instance.query(f"create table test_multiple_inserts as {table_function}")
instance.query(f"truncate table test_multiple_inserts")
instance.query(
f"insert into test_multiple_inserts select number, randomString(100) from numbers(10) settings s3_truncate_on_insert=1"
)
instance.query(
f"insert into test_multiple_inserts select number, randomString(100) from numbers(20) settings s3_create_new_file_on_insert=1"
)
instance.query(
f"insert into test_multiple_inserts select number, randomString(100) from numbers(30) settings s3_create_new_file_on_insert=1"
)
result = instance.query(f"select count() from test_multiple_inserts")
assert int(result) == 60
def test_format_detection(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
instance.query(f"create table arrow_table_s3 (x UInt64) engine=S3(s3_arrow)")
instance.query(f"insert into arrow_table_s3 select 1")
result = instance.query(f"select * from s3(s3_arrow)")
assert int(result) == 1
result = instance.query(
f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow')"
)
assert int(result) == 1
result = instance.query(
f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow')"
)
assert int(result) == 1
instance.query(f"create table parquet_table_s3 (x UInt64) engine=S3(s3_parquet2)")
instance.query(f"insert into parquet_table_s3 select 1")
result = instance.query(f"select * from s3(s3_parquet2)")
assert int(result) == 1
result = instance.query(
f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.parquet')"
)
assert int(result) == 1
result = instance.query(
f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.parquet')"
)
assert int(result) == 1
def test_schema_inference_from_globs(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
instance.query(
f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL"
)
instance.query(
f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0"
)
url_filename = "test{1,2}.jsoncompacteachrow"
result = instance.query(
f"desc url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{url_filename}')"
)
assert result.strip() == "c1\tNullable(Float64)"
result = instance.query(
f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{url_filename}')"
)
assert sorted(result.split()) == ["0", "\\N"]
result = instance.query(
f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test*.jsoncompacteachrow')"
)
assert result.strip() == "c1\tNullable(Float64)"
result = instance.query(
f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test*.jsoncompacteachrow')"
)
assert sorted(result.split()) == ["0", "\\N"]
def test_signatures(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
instance.query(f"create table test_signatures (x UInt64) engine=S3(s3_arrow)")
instance.query(f"truncate table test_signatures")
instance.query(f"insert into test_signatures select 1")
result = instance.query(
f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow')"
)
assert int(result) == 1
result = instance.query(
f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'Arrow', 'x UInt64')"
)
assert int(result) == 1
result = instance.query(
f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123')"
)
assert int(result) == 1
result = instance.query(
f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'Arrow', 'x UInt64', 'auto')"
)
assert int(result) == 1
result = instance.query(
f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123', 'Arrow')"
)
assert int(result) == 1
def test_select_columns(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
name = "test_table2"
structure = "id UInt32, value1 Int32, value2 Int32"
instance.query(f"drop table if exists {name}")
instance.query(
f"CREATE TABLE {name} ({structure}) ENGINE = S3(s3_conf1, format='Parquet')"
)
limit = 10000000
instance.query(
f"INSERT INTO {name} SELECT * FROM generateRandom('{structure}') LIMIT {limit} SETTINGS s3_truncate_on_insert=1"
)
instance.query(f"SELECT value2 FROM {name}")
instance.query("SYSTEM FLUSH LOGS")
result1 = instance.query(
f"SELECT read_bytes FROM system.query_log WHERE type='QueryFinish' and query LIKE 'SELECT value2 FROM {name}'"
)
instance.query(f"SELECT * FROM {name}")
instance.query("SYSTEM FLUSH LOGS")
result2 = instance.query(
f"SELECT read_bytes FROM system.query_log WHERE type='QueryFinish' and query LIKE 'SELECT * FROM {name}'"
)
assert int(result1) * 3 <= int(result2)
def test_insert_select_schema_inference(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
instance.query(
f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native') select toUInt64(1) as x"
)
result = instance.query(
f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native')"
)
assert result.strip() == "x\tUInt64"
result = instance.query(
f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native')"
)
assert int(result) == 1
|
utils.py
|
"""Utilities shared by tests."""
import asyncio
import collections
import contextlib
import io
import logging
import os
import re
import selectors
import socket
import socketserver
import sys
import tempfile
import threading
import time
import unittest
import weakref
from unittest import mock
from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from asyncio import base_events
from asyncio import events
from asyncio import format_helpers
from asyncio import futures
from asyncio import tasks
from asyncio.log import logger
from test import support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), '..', filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Jul 7 14:23:16 2028 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
def simple_server_sslcontext():
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
return server_context
def simple_client_sslcontext(*, disable_verify=True):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.check_hostname = False
if disable_verify:
client_context.verify_mode = ssl.CERT_NONE
return client_context
def dummy_ssl_context():
if ssl is None:
return None
else:
return ssl.SSLContext(ssl.PROTOCOL_TLS)
def run_briefly(loop):
async def once():
pass
gen = once()
t = loop.create_task(gen)
# Don't log a warning if the task is not done after run_until_complete().
# It occurs if the loop is stopped or if a task raises a BaseException.
t._log_destroy_pending = False
try:
loop.run_until_complete(t)
finally:
gen.close()
def run_until(loop, pred, timeout=support.SHORT_TIMEOUT):
deadline = time.monotonic() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.monotonic()
if timeout <= 0:
raise futures.TimeoutError()
loop.run_until_complete(tasks.sleep(0.001))
def run_once(loop):
"""Legacy API to run once through the event loop.
This is the recommended pattern for test code. It will poll the
selector once and run all callbacks scheduled in response to I/O
events.
"""
loop.call_soon(loop.stop)
loop.run_forever()
class SilentWSGIRequestHandler(WSGIRequestHandler):
def get_stderr(self):
return io.StringIO()
def log_message(self, format, *args):
pass
class SilentWSGIServer(WSGIServer):
request_timeout = support.LOOPBACK_TIMEOUT
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
return request, client_addr
def handle_error(self, request, client_address):
pass
class SSLWSGIServerMixin:
def finish_request(self, request, client_address):
# The relative location of our test directory (which
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
context = ssl.SSLContext()
context.load_cert_chain(ONLYCERT, ONLYKEY)
ssock = context.wrap_socket(request, server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
except OSError:
# maybe socket has been closed by peer
pass
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
def loop(environ):
size = int(environ['CONTENT_LENGTH'])
while size:
data = environ['wsgi.input'].read(min(size, 0x10000))
yield data
size -= len(data)
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
if environ['PATH_INFO'] == '/loop':
return loop(environ)
else:
return [b'Test message']
# Run the test WSGI server in a separate thread in order not to
# interfere with event handling in the main thread
server_class = server_ssl_cls if use_ssl else server_cls
httpd = server_class(address, SilentWSGIRequestHandler)
httpd.set_app(app)
httpd.address = httpd.server_address
server_thread = threading.Thread(
target=lambda: httpd.serve_forever(poll_interval=0.05))
server_thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
server_thread.join()
if hasattr(socket, 'AF_UNIX'):
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
self.server_name = '127.0.0.1'
self.server_port = 80
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
request_timeout = support.LOOPBACK_TIMEOUT
def server_bind(self):
UnixHTTPServer.server_bind(self)
self.setup_environ()
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
# However, this isn't true for UNIX sockets,
# as the second return value will be a path;
# hence we return some fake data sufficient
# to get the tests going
return request, ('127.0.0.1', '')
class SilentUnixWSGIServer(UnixWSGIServer):
def handle_error(self, request, client_address):
pass
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
pass
def gen_unix_socket_path():
with tempfile.NamedTemporaryFile() as file:
return file.name
@contextlib.contextmanager
def unix_socket_path():
path = gen_unix_socket_path()
try:
yield path
finally:
try:
os.unlink(path)
except OSError:
pass
@contextlib.contextmanager
def run_test_unix_server(*, use_ssl=False):
with unix_socket_path() as path:
yield from _run_test_server(address=path, use_ssl=use_ssl,
server_cls=SilentUnixWSGIServer,
server_ssl_cls=UnixSSLWSGIServer)
@contextlib.contextmanager
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
server_cls=SilentWSGIServer,
server_ssl_cls=SSLWSGIServer)
def make_test_protocol(base):
dct = {}
for name in dir(base):
if name.startswith('__') and name.endswith('__'):
# skip magic names
continue
dct[name] = MockCallback(return_value=None)
return type('TestProtocol', (base,) + base.__bases__, dct)()
class TestSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout):
return []
def get_map(self):
return self.keys
class TestLoop(base_events.BaseEventLoop):
"""Loop for unittests.
It manages self time directly.
If something scheduled to be executed later then
on next loop iteration after all ready handlers done
generator passed to __init__ is calling.
Generator should be like this:
def gen():
...
when = yield ...
... = yield time_advance
Value returned by yield is absolute time of next scheduled handler.
Value passed to yield is time advance to move loop's time forward.
"""
def __init__(self, gen=None):
super().__init__()
if gen is None:
def gen():
yield
self._check_on_close = False
else:
self._check_on_close = True
self._gen = gen()
next(self._gen)
self._time = 0
self._clock_resolution = 1e-9
self._timers = []
self._selector = TestSelector()
self.readers = {}
self.writers = {}
self.reset_counters()
self._transports = weakref.WeakValueDictionary()
def time(self):
return self._time
def advance_time(self, advance):
"""Move test time forward."""
if advance:
self._time += advance
def close(self):
super().close()
if self._check_on_close:
try:
self._gen.send(0)
except StopIteration:
pass
else: # pragma: no cover
raise AssertionError("Time generator is not finished")
def _add_reader(self, fd, callback, *args):
self.readers[fd] = events.Handle(callback, args, self, None)
def _remove_reader(self, fd):
self.remove_reader_count[fd] += 1
if fd in self.readers:
del self.readers[fd]
return True
else:
return False
def assert_reader(self, fd, callback, *args):
if fd not in self.readers:
raise AssertionError(f'fd {fd} is not registered')
handle = self.readers[fd]
if handle._callback != callback:
raise AssertionError(
f'unexpected callback: {handle._callback} != {callback}')
if handle._args != args:
raise AssertionError(
f'unexpected callback args: {handle._args} != {args}')
def assert_no_reader(self, fd):
if fd in self.readers:
raise AssertionError(f'fd {fd} is registered')
def _add_writer(self, fd, callback, *args):
self.writers[fd] = events.Handle(callback, args, self, None)
def _remove_writer(self, fd):
self.remove_writer_count[fd] += 1
if fd in self.writers:
del self.writers[fd]
return True
else:
return False
def assert_writer(self, fd, callback, *args):
assert fd in self.writers, 'fd {} is not registered'.format(fd)
handle = self.writers[fd]
assert handle._callback == callback, '{!r} != {!r}'.format(
handle._callback, callback)
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
def _ensure_fd_no_transport(self, fd):
if not isinstance(fd, int):
try:
fd = int(fd.fileno())
except (AttributeError, TypeError, ValueError):
# This code matches selectors._fileobj_to_fd function.
raise ValueError("Invalid file object: "
"{!r}".format(fd)) from None
try:
transport = self._transports[fd]
except KeyError:
pass
else:
raise RuntimeError(
'File descriptor {!r} is used by transport {!r}'.format(
fd, transport))
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._ensure_fd_no_transport(fd)
return self._add_reader(fd, callback, *args)
def remove_reader(self, fd):
"""Remove a reader callback."""
self._ensure_fd_no_transport(fd)
return self._remove_reader(fd)
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._ensure_fd_no_transport(fd)
return self._add_writer(fd, callback, *args)
def remove_writer(self, fd):
"""Remove a writer callback."""
self._ensure_fd_no_transport(fd)
return self._remove_writer(fd)
def reset_counters(self):
self.remove_reader_count = collections.defaultdict(int)
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
super()._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
self._timers = []
def call_at(self, when, callback, *args, context=None):
self._timers.append(when)
return super().call_at(when, callback, *args, context=context)
def _process_events(self, event_list):
return
def _write_to_self(self):
pass
def MockCallback(**kwargs):
return mock.Mock(spec=['__call__'], **kwargs)
class MockPattern(str):
"""A regex based str with a fuzzy __eq__.
Use this helper with 'mock.assert_called_with', or anywhere
where a regex comparison between strings is needed.
For instance:
mock_call.assert_called_with(MockPattern('spam.*ham'))
"""
def __eq__(self, other):
return bool(re.search(str(self), other, re.S))
class MockInstanceOf:
def __init__(self, type):
self._type = type
def __eq__(self, other):
return isinstance(other, self._type)
def get_function_source(func):
source = format_helpers._get_function_source(func)
if source is None:
raise ValueError("unable to get the source of %r" % (func,))
return source
class TestCase(unittest.TestCase):
@staticmethod
def close_loop(loop):
if loop._default_executor is not None:
if not loop.is_closed():
loop.run_until_complete(loop.shutdown_default_executor())
else:
loop._default_executor.shutdown(wait=True)
loop.close()
policy = support.maybe_get_event_loop_policy()
if policy is not None:
try:
watcher = policy.get_child_watcher()
except NotImplementedError:
# watcher is not implemented by EventLoopPolicy, e.g. Windows
pass
else:
if isinstance(watcher, asyncio.ThreadedChildWatcher):
threads = list(watcher._threads.values())
for thread in threads:
thread.join()
def set_event_loop(self, loop, *, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
if cleanup:
self.addCleanup(self.close_loop, loop)
def new_test_loop(self, gen=None):
loop = TestLoop(gen)
self.set_event_loop(loop)
return loop
def unpatch_get_running_loop(self):
events._get_running_loop = self._get_running_loop
def setUp(self):
self._get_running_loop = events._get_running_loop
events._get_running_loop = lambda: None
self._thread_cleanup = support.threading_setup()
def tearDown(self):
self.unpatch_get_running_loop()
events.set_event_loop(None)
# Detect CPython bug #23353: ensure that yield/yield-from is not used
# in an except block of a generator
self.assertEqual(sys.exc_info(), (None, None, None))
self.doCleanups()
support.threading_cleanup(*self._thread_cleanup)
support.reap_children()
@contextlib.contextmanager
def disable_logger():
"""Context manager to disable asyncio logger.
For example, it can be used to ignore warnings in debug mode.
"""
old_level = logger.level
try:
logger.setLevel(logging.CRITICAL+1)
yield
finally:
logger.setLevel(old_level)
def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM,
family=socket.AF_INET):
"""Create a mock of a non-blocking socket."""
sock = mock.MagicMock(socket.socket)
sock.proto = proto
sock.type = type
sock.family = family
sock.gettimeout.return_value = 0.0
return sock
|
ui.py
|
__author__ = 'benoit'
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
import multiprocessing, time, threading
from .game import *
from .solver import *
class SolitaireUI:
def __init__(self, solitaire):
self.solitaire = solitaire
self.board = self.solitaire.board
self.fig, self.ax = plt.subplots()
#Positionnement des boutons et du texte (système d'axes)
self.btnUndo =plt.axes([0.7, 0.91, 0.1, 0.05])
self.btnGoToStart =plt.axes([0.80, 0.91, 0.05, 0.05])
self.btnBack =plt.axes([0.85, 0.91, 0.05, 0.05])
self.btnNext =plt.axes([0.90, 0.91, 0.05, 0.05])
self.btnGotoEnd =plt.axes([0.95, 0.91, 0.05, 0.05])
self.btnSearch = plt.axes([0, 0.91, 0.1, 0.05])
self.btnStopsearching = plt.axes([0.2, 0.91, 0.2, 0.05])
self.btnSave = plt.axes([0, 0, 0.1, 0.05])
self.btnLoad = plt.axes([0.2, 0, 0.1, 0.05])
self.solver = None
self.possibleMoves = [] #list of possible Moves that are drawn
self.possibleMovesPoints = [] #reference to the possible moves points on the canvas
self.movePoints =[]
self.plotIndex = 0
self.startPoint = 0
def show(self):
self.ax.clear()
self.plotIndex = 0
self.title = self.ax.text(1,1, 'Solitaire ')
self.title.set_backgroundcolor('w')
# set axes
self.ax.set_xlim(0,len(self.board[:,0]))
self.ax.set_ylim(0,len(self.board[0]))
self.ax.set_xticks(range(0,len(self.board[:,0])))
self.ax.set_yticks(range(0,len(self.board[0])))
#grid
pts = self.solitaire.getGrid()
x = list([p[0] for p in pts])
y = list([p[1] for p in pts])
self.ax.scatter(x,y,s=1, color='g', linestyle='-')
# points
pts = self.solitaire.getStartingPawns()
x = list([p[0] for p in pts])
y = list([p[1] for p in pts])
self.ax.scatter(x,y, s=50, color='b', linestyle='-')
self.plotPossibleMoves()
# draw all lines
if len(self.solitaire.moves) > 0 :
for m in self.solitaire.moves:
self.plotMove(m)
self.fig.canvas.mpl_connect('button_press_event', self.onButtonPressed)
self.fig.canvas.mpl_connect('button_release_event', self.onButtonReleased)
bundo = Button(self.btnUndo, 'Undo')
bundo.on_clicked(self.undo)
btgotoend = Button(self.btnGotoEnd, '>>')
btgotoend.on_clicked(self.navigateEnd)
bnext = Button(self.btnNext, '>')
bnext.on_clicked(self.navigateNext)
bback = Button(self.btnBack, '<')
bback.on_clicked(self.navigateBack)
btgotostart = Button(self.btnGoToStart, '<<')
btgotostart.on_clicked(self.navigateStart)
bsearch = Button(self.btnSearch, 'Search')
bsearch.on_clicked(self.getBestGame)
bstop = Button(self.btnStopsearching, 'Stop Searching')
bstop.on_clicked(self.stopSolver)
bsave = Button(self.btnSave, 'Save')
bsave.on_clicked(self.saveGame)
bload = Button(self.btnLoad, 'Load')
bload.on_clicked(self.loadGame)
plt.show()
def getBestGame(self, event):
self.solitaire.starttime = dt.datetime.now()
self.startPoint = len(self.solitaire.moves)
if self.solver == None:
self.solver = Solver(self.solitaire)
else:
self.solver.hasBeenSearched.remove(self.solitaire.getHash())
self.solver = Solver(self.solitaire, self.solver.hasBeenSearched)
thread = threading.Thread(target=self.solver.solve)
thread.start()
print('check thread')
def stopSolver(self, event):
if self.solver:
self.solver.stopSolver()
print('stopping the solver')
self.solitaire = self.solver.bestGame
self.ax.clear()
self.show()
def onButtonPressed(self, event):
"""
‘key_press_event’ KeyEvent - key is pressed
Retrieve the actual coordinate of point starting a line
Solitaire UI is in state moveStarted true
:return:
"""
if event.xdata and event.ydata:
self.currentStartPoint =(int(round(event.xdata)),int(round(event.ydata)))
def onButtonReleased(self, event):
"""
Retrieve x,y coordinates of the second point and try to play a move.
If move not valid - moveStarted gets to be false
:return:
"""
if self.plotIndex != len(self.solitaire.moves):
print('cannot play while navigating the game')
return
if event.xdata and event.ydata:
currentEndPoint = (int(round(event.xdata)),int(round(event.ydata)))
print(currentEndPoint)
newline = self.solitaire.addMove(self.currentStartPoint, currentEndPoint)
if newline:
self.plotMove(self.solitaire.moves[-1]) #last line
def plotMove(self, move):
"""
plot a line with missing point in red and line
@:param: Move
"""
if self.possibleMovesPoints:
self.cleanPossibleMoves()
#plot point + line
scat = self.ax.scatter(move.point[0], move.point[1], color='r')
self.movePoints.append(scat)
self.plotIndex += 1
self.ax.plot((move.line.p1[0], move.line.p2[0]), (move.line.p1[1], move.line.p2[1]), color='r', linestyle='-')
s = 'Moves: ' +str(self.plotIndex)+'/'+ str(len(self.solitaire.moves))
self.title.set_text(s)
if self.plotIndex == len(self.solitaire.moves):
self.plotPossibleMoves()
plt.draw()
def undo(self,event):
if self.plotIndex != len(self.solitaire.moves):
print('cannot undo if navigating the game')
return
self.cleanPossibleMoves()
m = self.solitaire.undoLastMove()
self.plotIndex -= 1
#remove line
self.ax.get_lines().pop().remove()
self.movePoints.pop().remove()
for pt in m.line.points:
if self.solitaire.board[pt[0],pt[1]] == 0:
self.ax.scatter(pt[0], pt[1], s=1, color='g')
s = 'Moves: ' +str(self.plotIndex)+'/'+ str(len(self.solitaire.moves))
self.title.set_text(s)
self.plotPossibleMoves()
plt.draw()
def unplotMove(self,m):
"""
:param Move m:
:return:
"""
self.cleanPossibleMoves()
#remove line
self.ax.get_lines().pop().remove()
self.movePoints.pop().remove()
for pt in m.line.points:
if self.solitaire.board[pt[0],pt[1]] == 0:
self.ax.scatter(pt[0], pt[1], s=1, color='g')
self.plotIndex -= 1
s = 'Moves: ' +str(self.plotIndex)+'/'+ str(len(self.solitaire.moves))
self.title.set_text(s)
plt.draw()
def cleanPossibleMoves(self):
#erase previous possible moves
for m in self.possibleMovesPoints:
try:
m.remove()
except:
pass
self.possibleMovesPoints = []
def plotPossibleMoves(self):
#plot possible moves
self.possibleMoves = self.solitaire.getPossibleMoves()
for move in self.possibleMoves:
pt = self.ax.scatter(move.point[0], move.point[1], color='tomato', s=8)
self.possibleMovesPoints.append(pt)
plt.draw()
def saveGame(self, event):
self.solitaire.saveToFile()
def loadGame(self, event):
new_solitaire = self.solitaire.loadFromFile('data/game.dmp')
self.solitaire = new_solitaire
self.show()
def navigateNext(self, event):
if self.plotIndex < len(self.solitaire.moves):
self.plotMove(self.solitaire.moves[self.plotIndex])
def navigateBack(self, event):
if self.plotIndex >0:
self.unplotMove(self.solitaire.moves[self.plotIndex-1])
def navigateEnd(self, event):
while self.plotIndex < len(self.solitaire.moves):
self.plotMove(self.solitaire.moves[self.plotIndex])
def navigateStart(self, event):
while self.plotIndex > self.startPoint:
self.unplotMove(self.solitaire.moves[self.plotIndex-1])
self.startPoint = 0
|
jerk_agent_collect.py
|
#!/usr/bin/env python
"""
A scripted agent called "Just Enough Retained Knowledge".
"""
import argparse
import json
import multiprocessing
import os
import random
from uuid import uuid4
import cv2
import gym
import numpy as np
import retro
from tqdm import trange
from sonicrl.environments import get_environments
EXPLOIT_BIAS = 0.25
TOTAL_TIMESTEPS = int(1e6)
BACKTRACK_REWARD_THRESHOLD = 0
class ObservationSaver:
def __init__(self, output_path, frame_format, image_directory):
self._outfile = open(output_path, 'w')
self._frame_format = frame_format
self._image_directory = image_directory
self.num_saved = 0
def save(self, episode_id, game, state, observation, action, reward, done):
uuid = uuid4().hex
frame_name = self._frame_format.format(game=game, state=state, uuid=uuid)
frame_path = os.path.join(args.image_directory, frame_name)
line = dict(episode_id=episode_id, game=game, state=state, image_id=frame_name,
reward=reward, action=action.tolist(), done=done)
self._outfile.write(json.dumps(line) + '\n')
bgr = cv2.cvtColor(observation, cv2.COLOR_RGB2BGR)
cv2.imwrite(frame_path, bgr)
self.num_saved += 1
def __del__(self):
self._outfile.close()
def main(observation_queue, observations_per_env, environment_args):
"""Run JERK on the attached environment."""
new_ep = True
observation_buffer = []
env = None
while True:
if new_ep:
if env is not None:
env.close()
episode_id = uuid4().hex
min_obs = min(observations_per_env.values())
env_idx = random.choice([i for i, count in observations_per_env.items() if count == min_obs])
args = environment_args[env_idx]
current_game = args['game']
current_state = args['state']
env = retro.make(**args)
env = gym.wrappers.TimeLimit(env, max_episode_steps=18000)
_ = env.reset()
reward, new_ep = move(env, 100, observation_buffer)
if not new_ep and reward <= BACKTRACK_REWARD_THRESHOLD:
reward, new_ep = move(env, 70, observation_buffer, left=True)
while len(observation_buffer[0]) == 4:
obs, reward, done, action = observation_buffer.pop(0)
observation_queue.put((episode_id, current_game, current_state, obs, reward, done, action))
observations_per_env[env_idx] += 1
def move(env, num_steps, buffer, left=False, jump_prob=1.0 / 40.0, jump_repeat=4):
"""
Move right or left for a certain number of steps,
jumping periodically.
"""
total_rew = 0.0
done = False
steps_taken = 0
jumping_steps_left = 0
while not done and steps_taken < num_steps:
action = np.zeros((12,), dtype=np.bool)
action[6] = left
action[7] = not left
if jumping_steps_left > 0:
action[0] = True
jumping_steps_left -= 1
else:
if random.random() < jump_prob:
jumping_steps_left = jump_repeat - 1
action[0] = True
if buffer:
buffer[-1].append(action)
obs, rew, done, _ = env.step(action)
# env.render(mode='human')
buffer.append([obs, rew, done])
total_rew += rew
steps_taken += 1
if done:
break
return total_rew, done
def exploit(env, sequence):
"""
Replay an action sequence; pad with NOPs if needed.
Returns the final cumulative reward.
"""
env.reset()
done = False
idx = 0
while not done:
if idx >= len(sequence):
action = np.zeros((12,), dtype='bool')
else:
action = sequence[idx]
_, _, done, _ = env.step(action)
# env.render(mode='human')
idx += 1
return env.total_reward
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('output_file', help='file to store metadata')
parser.add_argument('image_directory', help='directory in which to store frames')
parser.add_argument('--environments', nargs='+',
help='one or more environment csvs describing environment game and states (e.g. train/val)')
parser.add_argument('--verbose', '-v', action='count', default=1,
help='increase verbosity (can be specified multiple times)')
parser.add_argument('--quiet', '-q', action='count', default=0,
help='decrease verbosity (can be specified multiple times)')
args = parser.parse_args()
environments = []
for env_file in args.environments:
environments.extend(get_environments(env_file))
manager = multiprocessing.Manager()
observations_per_env = manager.dict()
for i in range(len(environments)):
observations_per_env[i] = 0
observation_queue = multiprocessing.Queue(maxsize=30000)
for i in range(2):
worker = multiprocessing.Process(target=main, args=(observation_queue, observations_per_env, environments))
worker.daemon = True
worker.start()
frame_format = '{game}_{state}_{uuid}.jpg'
saver = ObservationSaver(args.output_file, frame_format, args.image_directory)
for _ in trange(4500000):
episode_id, current_game, current_state, obs, reward, done, action = observation_queue.get()
saver.save(episode_id, current_game, current_state, obs, action, reward, done)
|
utils.py
|
# -*- coding: utf-8 -*-
import contextlib
import errno
import importlib
import itertools
import json
import os
import queue
import sys
from atomicwrites import atomic_write
import click
import click_threading
from . import cli_logger
from .. import BUGTRACKER_HOME, DOCS_HOME, exceptions
from ..sync.exceptions import IdentConflict, PartialSync, StorageEmpty, \
SyncConflict
from ..sync.status import SqliteStatus
from ..utils import expand_path, get_storage_init_args
STATUS_PERMISSIONS = 0o600
STATUS_DIR_PERMISSIONS = 0o700
class _StorageIndex(object):
def __init__(self):
self._storages = dict(
caldav='vdirsyncer.storage.dav.CalDAVStorage',
carddav='vdirsyncer.storage.dav.CardDAVStorage',
filesystem='vdirsyncer.storage.filesystem.FilesystemStorage',
http='vdirsyncer.storage.http.HttpStorage',
singlefile='vdirsyncer.storage.singlefile.SingleFileStorage',
google_calendar='vdirsyncer.storage.google.GoogleCalendarStorage',
google_contacts='vdirsyncer.storage.google.GoogleContactsStorage'
)
def __getitem__(self, name):
item = self._storages[name]
if not isinstance(item, str):
return item
modname, clsname = item.rsplit('.', 1)
mod = importlib.import_module(modname)
self._storages[name] = rv = getattr(mod, clsname)
assert rv.storage_name == name
return rv
storage_names = _StorageIndex()
del _StorageIndex
class JobFailed(RuntimeError):
pass
def handle_cli_error(status_name=None, e=None):
'''
Print a useful error message for the current exception.
This is supposed to catch all exceptions, and should never raise any
exceptions itself.
'''
try:
if e is not None:
raise e
else:
raise
except exceptions.UserError as e:
cli_logger.critical(e)
except StorageEmpty as e:
cli_logger.error(
'{status_name}: Storage "{name}" was completely emptied. If you '
'want to delete ALL entries on BOTH sides, then use '
'`vdirsyncer sync --force-delete {status_name}`. '
'Otherwise delete the files for {status_name} in your status '
'directory.'.format(
name=e.empty_storage.instance_name,
status_name=status_name
)
)
except PartialSync as e:
cli_logger.error(
'{status_name}: Attempted change on {storage}, which is read-only'
'. Set `partial_sync` in your pair section to `ignore` to ignore '
'those changes, or `revert` to revert them on the other side.'
.format(status_name=status_name, storage=e.storage)
)
except SyncConflict as e:
cli_logger.error(
'{status_name}: One item changed on both sides. Resolve this '
'conflict manually, or by setting the `conflict_resolution` '
'parameter in your config file.\n'
'See also {docs}/config.html#pair-section\n'
'Item ID: {e.ident}\n'
'Item href on side A: {e.href_a}\n'
'Item href on side B: {e.href_b}\n'
.format(status_name=status_name, e=e, docs=DOCS_HOME)
)
except IdentConflict as e:
cli_logger.error(
'{status_name}: Storage "{storage.instance_name}" contains '
'multiple items with the same UID or even content. Vdirsyncer '
'will now abort the synchronization of this collection, because '
'the fix for this is not clear; It could be the result of a badly '
'behaving server. You can try running:\n\n'
' vdirsyncer repair {storage.instance_name}\n\n'
'But make sure to have a backup of your data in some form. The '
'offending hrefs are:\n\n{href_list}\n'
.format(status_name=status_name,
storage=e.storage,
href_list='\n'.join(map(repr, e.hrefs)))
)
except (click.Abort, KeyboardInterrupt, JobFailed):
pass
except exceptions.PairNotFound as e:
cli_logger.error(
'Pair {pair_name} does not exist. Please check your '
'configuration file and make sure you\'ve typed the pair name '
'correctly'.format(pair_name=e.pair_name)
)
except exceptions.InvalidResponse as e:
cli_logger.error(
'The server returned something vdirsyncer doesn\'t understand. '
'Error message: {!r}\n'
'While this is most likely a serverside problem, the vdirsyncer '
'devs are generally interested in such bugs. Please report it in '
'the issue tracker at {}'
.format(e, BUGTRACKER_HOME)
)
except exceptions.CollectionRequired as e:
cli_logger.error(
'One or more storages don\'t support `collections = null`. '
'You probably want to set `collections = ["from a", "from b"]`.'
)
except Exception as e:
tb = sys.exc_info()[2]
import traceback
tb = traceback.format_tb(tb)
if status_name:
msg = 'Unknown error occurred for {}'.format(status_name)
else:
msg = 'Unknown error occurred'
msg += ': {}\nUse `-vdebug` to see the full traceback.'.format(e)
cli_logger.error(msg)
cli_logger.debug(''.join(tb))
def get_status_name(pair, collection):
if collection is None:
return pair
return pair + '/' + collection
def get_status_path(base_path, pair, collection=None, data_type=None):
assert data_type is not None
status_name = get_status_name(pair, collection)
path = expand_path(os.path.join(base_path, status_name))
if os.path.isfile(path) and data_type == 'items':
new_path = path + '.items'
# XXX: Legacy migration
cli_logger.warning('Migrating statuses: Renaming {} to {}'
.format(path, new_path))
os.rename(path, new_path)
path += '.' + data_type
return path
def load_status(base_path, pair, collection=None, data_type=None):
path = get_status_path(base_path, pair, collection, data_type)
if not os.path.exists(path):
return None
assert_permissions(path, STATUS_PERMISSIONS)
with open(path) as f:
try:
return dict(json.load(f))
except ValueError:
pass
return {}
def prepare_status_path(path):
dirname = os.path.dirname(path)
try:
os.makedirs(dirname, STATUS_DIR_PERMISSIONS)
except OSError as e:
if e.errno != errno.EEXIST:
raise
@contextlib.contextmanager
def manage_sync_status(base_path, pair_name, collection_name):
path = get_status_path(base_path, pair_name, collection_name, 'items')
status = None
legacy_status = None
try:
# XXX: Legacy migration
with open(path, 'rb') as f:
if f.read(1) == b'{':
f.seek(0)
# json.load doesn't work on binary files for Python 3.4/3.5
legacy_status = dict(json.loads(f.read().decode('utf-8')))
except (OSError, IOError, ValueError):
pass
if legacy_status is not None:
cli_logger.warning('Migrating legacy status to sqlite')
os.remove(path)
status = SqliteStatus(path)
status.load_legacy_status(legacy_status)
else:
prepare_status_path(path)
status = SqliteStatus(path)
yield status
def save_status(base_path, pair, collection=None, data_type=None, data=None):
assert data_type is not None
assert data is not None
status_name = get_status_name(pair, collection)
path = expand_path(os.path.join(base_path, status_name)) + '.' + data_type
prepare_status_path(path)
with atomic_write(path, mode='w', overwrite=True) as f:
json.dump(data, f)
os.chmod(path, STATUS_PERMISSIONS)
def storage_class_from_config(config):
config = dict(config)
if 'type' not in config:
raise exceptions.UserError('Missing parameter "type"')
storage_name = config.pop('type')
try:
cls = storage_names[storage_name]
except KeyError:
raise exceptions.UserError(
'Unknown storage type: {}'.format(storage_name))
return cls, config
def storage_instance_from_config(config, create=True):
'''
:param config: A configuration dictionary to pass as kwargs to the class
corresponding to config['type']
'''
cls, new_config = storage_class_from_config(config)
try:
return cls(**new_config)
except exceptions.CollectionNotFound as e:
if create:
config = handle_collection_not_found(
config, config.get('collection', None), e=str(e))
return storage_instance_from_config(config, create=False)
else:
raise
except Exception:
return handle_storage_init_error(cls, new_config)
def handle_storage_init_error(cls, config):
e = sys.exc_info()[1]
if not isinstance(e, TypeError) or '__init__' not in repr(e):
raise
all, required = get_storage_init_args(cls)
given = set(config)
missing = required - given
invalid = given - all
problems = []
if missing:
problems.append(
u'{} storage requires the parameters: {}'
.format(cls.storage_name, u', '.join(missing)))
if invalid:
problems.append(
u'{} storage doesn\'t take the parameters: {}'
.format(cls.storage_name, u', '.join(invalid)))
if not problems:
raise e
raise exceptions.UserError(
u'Failed to initialize {}'.format(config['instance_name']),
problems=problems
)
class WorkerQueue(object):
'''
A simple worker-queue setup.
Note that workers quit if queue is empty. That means you have to first put
things into the queue before spawning the worker!
'''
def __init__(self, max_workers):
self._queue = queue.Queue()
self._workers = []
self._max_workers = max_workers
self._shutdown_handlers = []
# According to http://stackoverflow.com/a/27062830, those are
# threadsafe compared to increasing a simple integer variable.
self.num_done_tasks = itertools.count()
self.num_failed_tasks = itertools.count()
def shutdown(self):
while self._shutdown_handlers:
try:
self._shutdown_handlers.pop()()
except Exception:
pass
def _worker(self):
while True:
try:
func = self._queue.get(False)
except queue.Empty:
break
try:
func(wq=self)
except Exception:
handle_cli_error()
next(self.num_failed_tasks)
finally:
self._queue.task_done()
next(self.num_done_tasks)
if not self._queue.unfinished_tasks:
self.shutdown()
def spawn_worker(self):
if self._max_workers and len(self._workers) >= self._max_workers:
return
t = click_threading.Thread(target=self._worker)
t.start()
self._workers.append(t)
@contextlib.contextmanager
def join(self):
assert self._workers or not self._queue.unfinished_tasks
ui_worker = click_threading.UiWorker()
self._shutdown_handlers.append(ui_worker.shutdown)
_echo = click.echo
with ui_worker.patch_click():
yield
if not self._workers:
# Ugly hack, needed because ui_worker is not running.
click.echo = _echo
cli_logger.critical('Nothing to do.')
sys.exit(5)
ui_worker.run()
self._queue.join()
for worker in self._workers:
worker.join()
tasks_failed = next(self.num_failed_tasks)
tasks_done = next(self.num_done_tasks)
if tasks_failed > 0:
cli_logger.error('{} out of {} tasks failed.'
.format(tasks_failed, tasks_done))
sys.exit(1)
def put(self, f):
return self._queue.put(f)
def assert_permissions(path, wanted):
permissions = os.stat(path).st_mode & 0o777
if permissions > wanted:
cli_logger.warning('Correcting permissions of {} from {:o} to {:o}'
.format(path, permissions, wanted))
os.chmod(path, wanted)
def handle_collection_not_found(config, collection, e=None):
storage_name = config.get('instance_name', None)
cli_logger.warning('{}No collection {} found for storage {}.'
.format('{}\n'.format(e) if e else '',
json.dumps(collection), storage_name))
if click.confirm('Should vdirsyncer attempt to create it?'):
storage_type = config['type']
cls, config = storage_class_from_config(config)
config['collection'] = collection
try:
args = cls.create_collection(**config)
args['type'] = storage_type
return args
except NotImplementedError as e:
cli_logger.error(e)
raise exceptions.UserError(
'Unable to find or create collection "{collection}" for '
'storage "{storage}". Please create the collection '
'yourself.'.format(collection=collection,
storage=storage_name))
|
api_chess.py
|
from multiprocessing import connection, Pipe
from threading import Thread
import numpy as np
from chess_zero.config import Config
class ChessModelAPI:
# noinspection PyUnusedLocal
def __init__(self, config: Config, agent_model): # ChessModel
self.agent_model = agent_model
self.pipes = []
def start(self):
prediction_worker = Thread(target=self.predict_batch_worker, name="prediction_worker")
prediction_worker.daemon = True
prediction_worker.start()
def get_pipe(self):
me, you = Pipe()
self.pipes.append(me)
return you
def predict_batch_worker(self):
while True:
ready = connection.wait(self.pipes,timeout=0.001)
if not ready:
continue
data, result_pipes = [], []
for pipe in ready:
while pipe.poll():
data.append(pipe.recv())
result_pipes.append(pipe)
data = np.asarray(data, dtype=np.float32)
policy_ary, value_ary = self.agent_model.model.predict_on_batch(data)
for pipe, p, v in zip(result_pipes, policy_ary, value_ary):
pipe.send((p, float(v)))
|
authenticator.py
|
"""Module for authenticating devices connecting to a faucet network"""
import sys
import os
import collections
import argparse
import threading
import yaml
from forch.heartbeat_scheduler import HeartbeatScheduler
import forch.radius_query as radius_query
from forch.simple_auth_state_machine import AuthStateMachine
from forch.utils import get_logger, proto_dict, dict_proto, ConfigError
from forch.proto.devices_state_pb2 import DevicePlacement
from forch.proto.authentication_pb2 import AuthResult
from forch.proto.forch_configuration_pb2 import OrchestrationConfig
HEARTBEAT_INTERVAL_SEC = 3
class Authenticator:
"""Authenticate devices using MAB/dot1x"""
def __init__(self, auth_config, auth_callback=None,
radius_query_object=None, metrics=None):
self.radius_query = None
self.sessions = {}
self._sessions_lock = threading.Lock()
self.auth_callback = auth_callback
self._metrics = metrics
self._logger = get_logger('auth')
radius_info = auth_config.radius_info
radius_ip = radius_info.server_ip
radius_port = radius_info.server_port
source_port = radius_info.source_port
if radius_info.radius_secret_helper:
secret = os.popen(radius_info.radius_secret_helper).read().strip()
else:
secret = None
if not (radius_ip and radius_port and secret):
self._logger.warning(
'Invalid radius_info in config. Radius IP: %s; Radius port: %s Secret present: %s',
radius_ip, radius_port, bool(secret))
raise ConfigError
Socket = collections.namedtuple(
'Socket', 'source_ip, source_port, server_ip, server_port')
socket_info = Socket('0.0.0.0', source_port, radius_ip, radius_port)
if radius_query_object:
self.radius_query = radius_query_object
else:
self.radius_query = radius_query.RadiusQuery(
socket_info, secret, self.process_radius_result)
threading.Thread(target=self.radius_query.receive_radius_messages, daemon=True).start()
interval = auth_config.heartbeat_sec or HEARTBEAT_INTERVAL_SEC
self.auth_config = auth_config
self.timer = HeartbeatScheduler(interval)
self.timer.add_callback(self.handle_sm_timeout)
self.timer.start()
self._logger.info(
'Created Authenticator module with radius IP %s and port %s.', radius_ip, radius_port)
def process_auth_result(self):
"""Prints Auth example object to out"""
base_dir = os.getenv('FORCH_CONFIG_DIR')
auth_ex_file = os.path.join(base_dir, 'auth_result.yaml')
auth_list = None
with open(auth_ex_file, 'r') as stream:
try:
auth_list = yaml.safe_load(stream).get('auth_list')
except yaml.YAMLError as exc:
self._logger.error("Error loading yaml file: %s", exc, exc_info=True)
for auth_obj in auth_list:
auth_example = dict_proto(auth_obj, AuthResult)
sys.stdout.write(str(proto_dict(auth_example)) + '\n')
def stop(self):
"""Stop state machine timer"""
if self.timer:
self.timer.stop()
def do_mab_request(self, src_mac, port_id):
"""Initiate MAB request"""
self._logger.info('sending MAB request for %s', src_mac)
self.radius_query.send_mab_request(src_mac, port_id)
def process_device_placement(self, src_mac, device_placement):
"""Process device placement info and initiate mab query"""
portid_hash = ((device_placement.switch + str(device_placement.port)).encode('utf-8')).hex()
port_id = int(portid_hash[:6], 16)
with self._sessions_lock:
if src_mac not in self.sessions:
self.sessions[src_mac] = AuthStateMachine(
src_mac, port_id, self.auth_config,
self.radius_query.send_mab_request,
self.process_session_result, metrics=self._metrics)
if device_placement.connected:
self.sessions[src_mac].host_learned()
elif not device_placement.connected:
self.sessions[src_mac].host_expired()
self.sessions.pop(src_mac)
def process_radius_result(self, src_mac, code, segment, role):
"""Process RADIUS result from radius_query"""
self._logger.info(
"Received RADIUS result for src_mac %s: %s, %s, %s",
src_mac, code, segment, role)
if self._metrics:
self._metrics.inc_var('radius_query_responses')
if code == radius_query.INVALID_RESP:
self._logger.warning("Received invalid response for src_mac: %s", src_mac)
return
if src_mac not in self.sessions:
self._logger.warning("Session doesn't exist for src_mac:%s", src_mac)
return
with self._sessions_lock:
if code == radius_query.ACCEPT:
if self._metrics:
self._metrics.inc_var('radius_query_accepts')
self.sessions[src_mac].received_radius_accept(segment, role)
else:
if self._metrics:
self._metrics.inc_var('radius_query_rejects')
self.sessions[src_mac].received_radius_reject()
def process_session_result(self, src_mac, access, segment=None, role=None):
"""Process session result"""
if self.auth_callback:
self.auth_callback(src_mac, access, segment, role)
def handle_sm_timeout(self):
"""Call timeout handlers for all active session state machines"""
with self._sessions_lock:
for session in self.sessions.values():
session.handle_sm_timer()
def parse_args(raw_args):
"""Parse sys args"""
parser = argparse.ArgumentParser(prog='authenticator', description='authenticator')
parser.add_argument('-s', '--server-ip', type=str, default='0.0.0.0',
help='RADIUS server ip')
parser.add_argument('-p', '--server-port', type=int, default=1812,
help='Server port that remote radius server is listening on')
parser.add_argument('-l', '--source-port', type=int, default=0,
help='Port to listen on for RADIUS responses')
parser.add_argument('-r', '--radius-secret', type=str, default='echo SECRET',
help='Command that prints RADIUS server secret')
parser.add_argument('-m', '--src_mac', type=str, default='8e:00:00:00:01:02',
help='MAC addr to authenticate')
parser.add_argument('-i', '--port-id', type=int, default=12345,
help='Unique identifier for physical port device is on')
parser.add_argument('--mab', action='store_true')
return parser.parse_args(raw_args)
if __name__ == '__main__':
class MockRadiusQuery():
"""Class mocking RadiusQuery"""
def __init__(self):
self._last_mac_query = None
def send_mab_request(self, src_mac, port_id):
"""mock RADIUS request"""
self._last_mac_query = src_mac
sys.stdout.write('RADIUS request for %s\n' % (src_mac))
def receive_radius_messages(self):
"""mock receive_radius_messages"""
def get_last_mac_queried(self):
"""Get last queried mac address and clear"""
mac = self._last_mac_query
self._last_mac_query = None
return mac
EXPECTED_MAB_RESULT = {}
def mock_auth_callback(src_mac, access, segment, role):
"""Mocks auth callback passed to Authenticator"""
mab_result = EXPECTED_MAB_RESULT.get(src_mac, {})
assert mab_result.get('segment') == segment and mab_result.get('role') == role
sys.stdout.write('auth_callback for %s: access:%s segment:%s role:%s\n'
% (src_mac, access, segment, role))
ARGS = parse_args(sys.argv[1:])
AUTH_CONFIG = dict_proto(
{
'radius_info': {
'server_ip': ARGS.server_ip,
'server_port': ARGS.server_port,
'source_port': ARGS.source_port,
'radius_secret_helper': f'echo {ARGS.radius_secret}'
}
},
OrchestrationConfig.AuthConfig
)
MOCK_RADIUS_QUERY = MockRadiusQuery()
if ARGS.mab:
AUTHENTICATOR = Authenticator(AUTH_CONFIG, mock_auth_callback)
AUTHENTICATOR.do_mab_request(ARGS.src_mac, ARGS.port_id)
input('Press any key to exit.')
else:
# test radius query call for device placement
AUTHENTICATOR = Authenticator(AUTH_CONFIG, mock_auth_callback, MOCK_RADIUS_QUERY)
TEST_MAC = '00:aa:bb:cc:dd:ee'
DEV_PLACEMENT = DevicePlacement(switch='t2s2', port=1, connected=True)
AUTHENTICATOR.process_device_placement(TEST_MAC, DEV_PLACEMENT)
assert MOCK_RADIUS_QUERY.get_last_mac_queried() == TEST_MAC
# test positive RADIUS response
CODE = radius_query.ACCEPT
SEGMENT = 'test'
ROLE = 'test'
EXPECTED_MAB_RESULT[TEST_MAC] = {
'segment': SEGMENT,
'role': ROLE
}
AUTHENTICATOR.process_radius_result(TEST_MAC, CODE, SEGMENT, ROLE)
EXPECTED_MAB_RESULT.pop(TEST_MAC)
|
process_replay.py
|
#!/usr/bin/env python3
import importlib
import os
import sys
import threading
import time
import signal
from collections import namedtuple
import capnp
from tqdm import tqdm
import cereal.messaging as messaging
from cereal import car, log
from cereal.services import service_list
from common.params import Params
from common.timeout import Timeout
from selfdrive.car.fingerprints import FW_VERSIONS
from selfdrive.car.car_helpers import get_car, interfaces
from selfdrive.manager.process import PythonProcess
from selfdrive.manager.process_config import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
CI = "CI" in os.environ
TIMEOUT = 15
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance', 'fake_pubsubmaster'])
def wait_for_event(evt):
if not evt.wait(TIMEOUT):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services):
super(FakeSubMaster, self).__init__(services, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super(FakeSubMaster, self).update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock, fingerprint):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['lateralPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock, fingerprint):
if fingerprint:
CarInterface, _, _ = interfaces[fingerprint]
CP = CarInterface.get_params(fingerprint)
else:
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def controlsd_rcv_callback(msg, CP, cfg, fsm):
# no sendcan until controlsd is initialized
socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
if "sendcan" in socks and fsm.frame < 2000:
socks.remove("sendcan")
return socks, len(socks) > 0
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"deviceState": [], "pandaStates": [], "peripheralState": [], "liveCalibration": [], "driverMonitoringState": [], "longitudinalPlan": [], "lateralPlan": [], "liveLocationKalman": [], "liveParameters": [], "radarState": [],
"modelV2": [], "driverCameraState": [], "roadCameraState": [], "ubloxRaw": [], "managerState": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=controlsd_rcv_callback,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "carState": [], "modelV2": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"modelV2": ["lateralPlan"], "radarState": ["longitudinalPlan"],
"carState": [], "controlsState": [],
},
ignore=["logMonoTime", "valid", "longitudinalPlan.processingDelay"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["driverMonitoringState"],
"liveCalibration": [], "carState": [], "modelV2": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=False,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
fake_pubsubmaster=False,
),
]
def replay_process(cfg, lr, fingerprint=None):
if cfg.fake_pubsubmaster:
return python_replay_process(cfg, lr, fingerprint)
else:
return cpp_replay_process(cfg, lr, fingerprint)
def python_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
params = Params()
params.clear_all()
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("Passive", False)
params.put_bool("CommunityFeaturesToggle", True)
os.environ['NO_RADAR_SLEEP'] = "1"
os.environ["SIMULATION"] = "1"
# TODO: remove after getting new route for civic & accord
migration = {
"HONDA CIVIC 2016 TOURING": "HONDA CIVIC 2016",
"HONDA ACCORD 2018 SPORT 2T": "HONDA ACCORD 2018",
"HONDA ACCORD 2T 2018": "HONDA ACCORD 2018",
}
if fingerprint is not None:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = fingerprint
else:
os.environ['SKIP_FW_QUERY'] = ""
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
car_fingerprint = migration.get(msg.carParams.carFingerprint, msg.carParams.carFingerprint)
if len(msg.carParams.carFw) and (car_fingerprint in FW_VERSIONS):
params.put("CarParamsCache", msg.carParams.as_builder().to_bytes())
else:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = car_fingerprint
assert(type(managed_processes[cfg.proc_name]) is PythonProcess)
managed_processes[cfg.proc_name].prepare()
mod = importlib.import_module(managed_processes[cfg.proc_name].module)
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock, fingerprint)
CP = car.CarParams.from_bytes(params.get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs, disable=CI):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(0, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg().as_builder()
m.logMonoTime = msg.logMonoTime
m = m.as_reader()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
log_msgs = []
managed_processes[cfg.proc_name].prepare()
managed_processes[cfg.proc_name].start()
try:
with Timeout(TIMEOUT):
while not all(pm.all_readers_updated(s) for s in cfg.pub_sub.keys()):
time.sleep(0)
# Make sure all subscribers are connected
sockets = {s: messaging.sub_sock(s, timeout=2000) for s in sub_sockets}
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for i, msg in enumerate(tqdm(pub_msgs, disable=False)):
pm.send(msg.which(), msg.as_builder())
resp_sockets = cfg.pub_sub[msg.which()] if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is None:
print(f"Warning, no response received {i}")
else:
response = response.as_builder()
response.logMonoTime = msg.logMonoTime
response = response.as_reader()
log_msgs.append(response)
if not len(resp_sockets): # We only need to wait if we didn't already wait for a response
while not pm.all_readers_updated(msg.which()):
time.sleep(0)
finally:
managed_processes[cfg.proc_name].signal(signal.SIGKILL)
managed_processes[cfg.proc_name].stop()
return log_msgs
|
maintenance.py
|
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import inspect
import re
import threading
from futurist import periodics
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import provider_net as pnet
from neutron_lib import constants as n_const
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from ovsdbapp.backend.ovs_idl import event as row_event
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import utils
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf
from neutron.db import ovn_hash_ring_db as hash_ring_db
from neutron.db import ovn_revision_numbers_db as revision_numbers_db
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync
CONF = cfg.CONF
LOG = log.getLogger(__name__)
INCONSISTENCY_TYPE_CREATE_UPDATE = 'create/update'
INCONSISTENCY_TYPE_DELETE = 'delete'
class MaintenanceThread(object):
def __init__(self):
self._callables = []
self._thread = None
self._worker = None
def add_periodics(self, obj):
for name, member in inspect.getmembers(obj):
if periodics.is_periodic(member):
LOG.debug('Periodic task found: %(owner)s.%(member)s',
{'owner': obj.__class__.__name__, 'member': name})
self._callables.append((member, (), {}))
def start(self):
if self._thread is None:
self._worker = periodics.PeriodicWorker(self._callables)
self._thread = threading.Thread(target=self._worker.start)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._worker.stop()
self._worker.wait()
self._thread.join()
self._worker = self._thread = None
def rerun_on_schema_updates(func):
"""Tasks decorated with this will rerun upon database version updates."""
func._rerun_on_schema_updates = True
return func
class OVNNBDBReconnectionEvent(row_event.RowEvent):
"""Event listening to reconnections from OVN Northbound DB."""
def __init__(self, driver, version):
self.driver = driver
self.version = version
table = 'Connection'
events = (self.ROW_CREATE,)
super(OVNNBDBReconnectionEvent, self).__init__(events, table, None)
self.event_name = self.__class__.__name__
def run(self, event, row, old):
curr_version = self.driver.get_ovn_nbdb_version()
if self.version != curr_version:
self.driver.nbdb_schema_updated_hook()
self.version = curr_version
class SchemaAwarePeriodicsBase(object):
def __init__(self, ovn_client):
self._nb_idl = ovn_client._nb_idl
self._set_schema_aware_periodics()
self._nb_idl.idl.notify_handler.watch_event(OVNNBDBReconnectionEvent(
self, self.get_ovn_nbdb_version()))
def get_ovn_nbdb_version(self):
return self._nb_idl.idl._db.version
def _set_schema_aware_periodics(self):
self._schema_aware_periodics = []
for name, member in inspect.getmembers(self):
if not inspect.ismethod(member):
continue
schema_upt = getattr(member, '_rerun_on_schema_updates', None)
if schema_upt and periodics.is_periodic(member):
LOG.debug('Schema aware periodic task found: '
'%(owner)s.%(member)s',
{'owner': self.__class__.__name__, 'member': name})
self._schema_aware_periodics.append(member)
@abc.abstractmethod
def nbdb_schema_updated_hook(self):
"""Hook invoked upon OVN NB schema is updated."""
class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase):
def __init__(self, ovn_client):
self._ovn_client = ovn_client
# FIXME(lucasagomes): We should not be accessing private
# attributes like that, perhaps we should extend the OVNClient
# class and create an interface for the locks ?
self._nb_idl = self._ovn_client._nb_idl
self._sb_idl = self._ovn_client._sb_idl
self._idl = self._nb_idl.idl
self._idl.set_lock('ovn_db_inconsistencies_periodics')
self._sync_timer = timeutils.StopWatch()
super(DBInconsistenciesPeriodics, self).__init__(ovn_client)
self._resources_func_map = {
ovn_const.TYPE_NETWORKS: {
'neutron_get': self._ovn_client._plugin.get_network,
'ovn_get': self._nb_idl.get_lswitch,
'ovn_create': self._ovn_client.create_network,
'ovn_update': self._ovn_client.update_network,
'ovn_delete': self._ovn_client.delete_network,
},
ovn_const.TYPE_PORTS: {
'neutron_get': self._ovn_client._plugin.get_port,
'ovn_get': self._nb_idl.get_lswitch_port,
'ovn_create': self._ovn_client.create_port,
'ovn_update': self._ovn_client.update_port,
'ovn_delete': self._ovn_client.delete_port,
},
ovn_const.TYPE_FLOATINGIPS: {
'neutron_get': self._ovn_client._l3_plugin.get_floatingip,
'ovn_get': self._nb_idl.get_floatingip_in_nat_or_lb,
'ovn_create': self._create_floatingip_and_pf,
'ovn_update': self._update_floatingip_and_pf,
'ovn_delete': self._delete_floatingip_and_pf,
},
ovn_const.TYPE_ROUTERS: {
'neutron_get': self._ovn_client._l3_plugin.get_router,
'ovn_get': self._nb_idl.get_lrouter,
'ovn_create': self._ovn_client.create_router,
'ovn_update': self._ovn_client.update_router,
'ovn_delete': self._ovn_client.delete_router,
},
ovn_const.TYPE_SECURITY_GROUPS: {
'neutron_get': self._ovn_client._plugin.get_security_group,
'ovn_get': self._nb_idl.get_port_group,
'ovn_create': self._ovn_client.create_security_group,
'ovn_delete': self._ovn_client.delete_security_group,
},
ovn_const.TYPE_SECURITY_GROUP_RULES: {
'neutron_get':
self._ovn_client._plugin.get_security_group_rule,
'ovn_get': self._nb_idl.get_acl_by_id,
'ovn_create': self._ovn_client.create_security_group_rule,
'ovn_delete': self._ovn_client.delete_security_group_rule,
},
ovn_const.TYPE_ROUTER_PORTS: {
'neutron_get':
self._ovn_client._plugin.get_port,
'ovn_get': self._nb_idl.get_lrouter_port,
'ovn_create': self._create_lrouter_port,
'ovn_update': self._ovn_client.update_router_port,
'ovn_delete': self._ovn_client.delete_router_port,
},
}
@property
def has_lock(self):
return not self._idl.is_lock_contended
def nbdb_schema_updated_hook(self):
if not self.has_lock:
return
for func in self._schema_aware_periodics:
LOG.debug('OVN Northbound DB schema version was updated,'
'invoking "%s"', func.__name__)
try:
func()
except periodics.NeverAgain:
pass
except Exception:
LOG.exception(
'Unknown error while executing "%s"', func.__name__)
def _fix_create_update(self, context, row):
res_map = self._resources_func_map[row.resource_type]
try:
# Get the latest version of the resource in Neutron DB
n_obj = res_map['neutron_get'](context, row.resource_uuid)
except n_exc.NotFound:
LOG.warning('Skip fixing resource %(res_uuid)s (type: '
'%(res_type)s). Resource does not exist in Neutron '
'database anymore', {'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
return
ovn_obj = res_map['ovn_get'](row.resource_uuid)
if not ovn_obj:
res_map['ovn_create'](context, n_obj)
else:
if row.resource_type == ovn_const.TYPE_SECURITY_GROUP_RULES:
LOG.error("SG rule %s found with a revision number while "
"this resource doesn't support updates",
row.resource_uuid)
elif row.resource_type == ovn_const.TYPE_SECURITY_GROUPS:
# In OVN, we don't care about updates to security groups,
# so just bump the revision number to whatever it's
# supposed to be.
revision_numbers_db.bump_revision(context, n_obj,
row.resource_type)
else:
ext_ids = getattr(ovn_obj, 'external_ids', {})
ovn_revision = int(ext_ids.get(
ovn_const.OVN_REV_NUM_EXT_ID_KEY, -1))
# If the resource exist in the OVN DB but the revision
# number is different from Neutron DB, updated it.
if ovn_revision != n_obj['revision_number']:
res_map['ovn_update'](context, n_obj)
else:
# If the resource exist and the revision number
# is equal on both databases just bump the revision on
# the cache table.
revision_numbers_db.bump_revision(context, n_obj,
row.resource_type)
def _fix_delete(self, context, row):
res_map = self._resources_func_map[row.resource_type]
ovn_obj = res_map['ovn_get'](row.resource_uuid)
if not ovn_obj:
revision_numbers_db.delete_revision(
context, row.resource_uuid, row.resource_type)
else:
res_map['ovn_delete'](context, row.resource_uuid)
def _fix_create_update_subnet(self, context, row):
# Get the lasted version of the port in Neutron DB
sn_db_obj = self._ovn_client._plugin.get_subnet(
context, row.resource_uuid)
n_db_obj = self._ovn_client._plugin.get_network(
context, sn_db_obj['network_id'])
if row.revision_number == ovn_const.INITIAL_REV_NUM:
self._ovn_client.create_subnet(context, sn_db_obj, n_db_obj)
else:
self._ovn_client.update_subnet(context, sn_db_obj, n_db_obj)
# The migration will run just once per neutron-server instance. If the lock
# is held by some other neutron-server instance in the cloud, we'll attempt
# to perform the migration every 10 seconds until completed.
# TODO(ihrachys): Remove the migration to stateless fips at some point.
@periodics.periodic(spacing=10, run_immediately=True)
@rerun_on_schema_updates
def migrate_to_stateless_fips(self):
"""Perform the migration from stateful to stateless Floating IPs. """
# Only the worker holding a valid lock within OVSDB will perform the
# migration.
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
nb_sync = ovn_db_sync.OvnNbSynchronizer(
self._ovn_client._plugin, self._nb_idl, self._ovn_client._sb_idl,
None, None)
nb_sync.migrate_to_stateless_fips(admin_context)
raise periodics.NeverAgain()
# The migration will run just once per neutron-server instance. If the lock
# is held by some other neutron-server instance in the cloud, we'll attempt
# to perform the migration every 10 seconds until completed.
# TODO(jlibosva): Remove the migration to port groups at some point. It's
# been around since Queens release so it is good to drop this soon.
@periodics.periodic(spacing=10, run_immediately=True)
@rerun_on_schema_updates
def migrate_to_port_groups(self):
"""Perform the migration from Address Sets to Port Groups. """
# TODO(dalvarez): Remove this in U cycle when we're sure that all
# versions are running using Port Groups (and OVS >= 2.10).
# If Port Groups are not supported or we've already migrated, we don't
# need to attempt to migrate again.
if not self._nb_idl.get_address_sets():
raise periodics.NeverAgain()
# Only the worker holding a valid lock within OVSDB will perform the
# migration.
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
nb_sync = ovn_db_sync.OvnNbSynchronizer(
self._ovn_client._plugin, self._nb_idl, self._ovn_client._sb_idl,
None, None)
nb_sync.migrate_to_port_groups(admin_context)
raise periodics.NeverAgain()
def _log_maintenance_inconsistencies(self, create_update_inconsistencies,
delete_inconsistencies):
if not CONF.debug:
return
def _log(inconsistencies, type_):
if not inconsistencies:
return
c = {}
for f in inconsistencies:
if f.resource_type not in c:
c[f.resource_type] = 1
else:
c[f.resource_type] += 1
fail_str = ', '.join('{}={}'.format(k, v) for k, v in c.items())
LOG.debug('Maintenance task: Number of inconsistencies '
'found at %(type_)s: %(fail_str)s',
{'type_': type_, 'fail_str': fail_str})
_log(create_update_inconsistencies, INCONSISTENCY_TYPE_CREATE_UPDATE)
_log(delete_inconsistencies, INCONSISTENCY_TYPE_DELETE)
@periodics.periodic(spacing=ovn_const.DB_CONSISTENCY_CHECK_INTERVAL,
run_immediately=True)
def check_for_inconsistencies(self):
# Only the worker holding a valid lock within OVSDB will run
# this periodic
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
create_update_inconsistencies = (
revision_numbers_db.get_inconsistent_resources(admin_context))
delete_inconsistencies = (
revision_numbers_db.get_deleted_resources(admin_context))
if not any([create_update_inconsistencies, delete_inconsistencies]):
LOG.debug('Maintenance task: No inconsistencies found. Skipping')
return
LOG.debug('Maintenance task: Synchronizing Neutron '
'and OVN databases')
self._log_maintenance_inconsistencies(create_update_inconsistencies,
delete_inconsistencies)
self._sync_timer.restart()
dbg_log_msg = ('Maintenance task: Fixing resource %(res_uuid)s '
'(type: %(res_type)s) at %(type_)s')
# Fix the create/update resources inconsistencies
for row in create_update_inconsistencies:
LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid,
'res_type': row.resource_type,
'type_': INCONSISTENCY_TYPE_CREATE_UPDATE})
try:
# NOTE(lucasagomes): The way to fix subnets is bit
# different than other resources. A subnet in OVN language
# is just a DHCP rule but, this rule only exist if the
# subnet in Neutron has the "enable_dhcp" attribute set
# to True. So, it's possible to have a consistent subnet
# resource even when it does not exist in the OVN database.
if row.resource_type == ovn_const.TYPE_SUBNETS:
self._fix_create_update_subnet(admin_context, row)
else:
self._fix_create_update(admin_context, row)
except Exception:
LOG.exception('Maintenance task: Failed to fix resource '
'%(res_uuid)s (type: %(res_type)s)',
{'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
# Fix the deleted resources inconsistencies
for row in delete_inconsistencies:
LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid,
'res_type': row.resource_type,
'type_': INCONSISTENCY_TYPE_DELETE})
try:
if row.resource_type == ovn_const.TYPE_SUBNETS:
self._ovn_client.delete_subnet(admin_context,
row.resource_uuid)
elif row.resource_type == ovn_const.TYPE_PORTS:
self._ovn_client.delete_port(admin_context,
row.resource_uuid)
else:
self._fix_delete(admin_context, row)
except Exception:
LOG.exception('Maintenance task: Failed to fix deleted '
'resource %(res_uuid)s (type: %(res_type)s)',
{'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
self._sync_timer.stop()
LOG.info('Maintenance task: Synchronization finished '
'(took %.2f seconds)', self._sync_timer.elapsed())
def _create_lrouter_port(self, context, port):
router_id = port['device_id']
iface_info = self._ovn_client._l3_plugin._add_neutron_router_interface(
context, router_id, {'port_id': port['id']}, may_exist=True)
self._ovn_client.create_router_port(context, router_id, iface_info)
def _check_subnet_global_dhcp_opts(self):
inconsistent_subnets = []
admin_context = n_context.get_admin_context()
subnet_filter = {'enable_dhcp': [True]}
neutron_subnets = self._ovn_client._plugin.get_subnets(
admin_context, subnet_filter)
global_v4_opts = ovn_conf.get_global_dhcpv4_opts()
global_v6_opts = ovn_conf.get_global_dhcpv6_opts()
LOG.debug('Checking %s subnets for global DHCP option consistency',
len(neutron_subnets))
for subnet in neutron_subnets:
ovn_dhcp_opts = self._nb_idl.get_subnet_dhcp_options(
subnet['id'])['subnet']
inconsistent_opts = []
if ovn_dhcp_opts:
if subnet['ip_version'] == n_const.IP_VERSION_4:
for opt, value in global_v4_opts.items():
if value != ovn_dhcp_opts['options'].get(opt, None):
inconsistent_opts.append(opt)
if subnet['ip_version'] == n_const.IP_VERSION_6:
for opt, value in global_v6_opts.items():
if value != ovn_dhcp_opts['options'].get(opt, None):
inconsistent_opts.append(opt)
if inconsistent_opts:
LOG.debug('Subnet %s has inconsistent DHCP opts: %s',
subnet['id'], inconsistent_opts)
inconsistent_subnets.append(subnet)
return inconsistent_subnets
def _create_floatingip_and_pf(self, context, floatingip):
self._ovn_client.create_floatingip(context, floatingip)
self._ovn_client._l3_plugin.port_forwarding.maintenance_create(
context, floatingip)
def _update_floatingip_and_pf(self, context, floatingip):
self._ovn_client.update_floatingip(context, floatingip)
self._ovn_client._l3_plugin.port_forwarding.maintenance_update(
context, floatingip)
def _delete_floatingip_and_pf(self, context, fip_id):
self._ovn_client._l3_plugin.port_forwarding.maintenance_delete(
context, fip_id)
self._ovn_client.delete_floatingip(context, fip_id)
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600,
run_immediately=True)
def check_global_dhcp_opts(self):
# This periodic task is included in DBInconsistenciesPeriodics since
# it uses the lock to ensure only one worker is executing
if not self.has_lock:
return
if (not ovn_conf.get_global_dhcpv4_opts() and
not ovn_conf.get_global_dhcpv6_opts()):
# No need to scan the subnets if the settings are unset.
raise periodics.NeverAgain()
LOG.debug('Maintenance task: Checking DHCP options on subnets')
self._sync_timer.restart()
fix_subnets = self._check_subnet_global_dhcp_opts()
if fix_subnets:
admin_context = n_context.get_admin_context()
LOG.debug('Triggering update for %s subnets', len(fix_subnets))
for subnet in fix_subnets:
neutron_net = self._ovn_client._plugin.get_network(
admin_context, subnet['network_id'])
try:
self._ovn_client.update_subnet(admin_context, subnet,
neutron_net)
except Exception:
LOG.exception('Failed to update subnet %s',
subnet['id'])
self._sync_timer.stop()
LOG.info('Maintenance task: DHCP options check finished '
'(took %.2f seconds)', self._sync_timer.elapsed())
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_igmp_snoop_support(self):
if not self.has_lock:
return
with self._nb_idl.transaction(check_error=True) as txn:
value = ('true' if ovn_conf.is_igmp_snooping_enabled()
else 'false')
for ls in self._nb_idl.ls_list().execute(check_error=True):
if (ls.other_config.get(ovn_const.MCAST_SNOOP,
None) == value or not ls.name):
continue
txn.add(self._nb_idl.db_set(
'Logical_Switch', ls.name,
('other_config', {
ovn_const.MCAST_SNOOP: value,
ovn_const.MCAST_FLOOD_UNREGISTERED: 'false'})))
raise periodics.NeverAgain()
def _delete_default_ha_chassis_group(self, txn):
# TODO(lucasgomes): Remove the deletion of the
# HA_CHASSIS_GROUP_DEFAULT_NAME in the Y cycle. We no longer
# have a default HA Chassis Group.
cmd = [self._nb_idl.ha_chassis_group_del(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, if_exists=True)]
self._ovn_client._transaction(cmd, txn=txn)
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_ha_chassis_group(self):
# If external ports is not supported stop running
# this periodic task
if not self._ovn_client.is_external_ports_supported():
raise periodics.NeverAgain()
if not self.has_lock:
return
external_ports = self._nb_idl.db_find_rows(
'Logical_Switch_Port', ('type', '=', ovn_const.LSP_TYPE_EXTERNAL)
).execute(check_error=True)
context = n_context.get_admin_context()
with self._nb_idl.transaction(check_error=True) as txn:
for port in external_ports:
network_id = port.external_ids[
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY].replace(
ovn_const.OVN_NAME_PREFIX, '')
ha_ch_grp = self._ovn_client.sync_ha_chassis_group(
context, network_id, txn)
try:
port_ha_ch_uuid = port.ha_chassis_group[0].uuid
except IndexError:
port_ha_ch_uuid = None
if port_ha_ch_uuid != ha_ch_grp:
txn.add(self._nb_idl.set_lswitch_port(
port.name, ha_chassis_group=ha_ch_grp))
self._delete_default_ha_chassis_group(txn)
raise periodics.NeverAgain()
# TODO(lucasagomes): Remove this in the Z cycle
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_mcast_flood_reports(self):
if not self.has_lock:
return
cmds = []
for port in self._nb_idl.lsp_list().execute(check_error=True):
port_type = port.type.strip()
if port_type in ("vtep", ovn_const.LSP_TYPE_LOCALPORT, "router"):
continue
options = port.options
if port_type == ovn_const.LSP_TYPE_LOCALNET:
mcast_flood_value = options.get(
ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS)
if mcast_flood_value == 'false':
continue
options.update({ovn_const.LSP_OPTIONS_MCAST_FLOOD: 'false'})
elif ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS in options:
continue
options.update({ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true'})
cmds.append(self._nb_idl.lsp_set_options(port.name, **options))
if cmds:
with self._nb_idl.transaction(check_error=True) as txn:
for cmd in cmds:
txn.add(cmd)
raise periodics.NeverAgain()
# TODO(lucasagomes): Remove this in the Z cycle
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_router_mac_binding_options(self):
if not self.has_lock:
return
cmds = []
for router in self._nb_idl.lr_list().execute(check_error=True):
if (router.options.get('always_learn_from_arp_request') and
router.options.get('dynamic_neigh_routers')):
continue
opts = copy.deepcopy(router.options)
opts.update({'always_learn_from_arp_request': 'false',
'dynamic_neigh_routers': 'true'})
cmds.append(self._nb_idl.update_lrouter(router.name, options=opts))
if cmds:
with self._nb_idl.transaction(check_error=True) as txn:
for cmd in cmds:
txn.add(cmd)
raise periodics.NeverAgain()
# TODO(ralonsoh): Remove this in the Z+2 cycle
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def update_port_qos_with_external_ids_reference(self):
"""Update all OVN QoS registers with the port ID
This method will only update the OVN QoS registers related to port QoS,
not FIP QoS. FIP QoS have the corresponding "external_ids" reference.
"""
if not self.has_lock:
return
regex = re.compile(
r'(inport|outport) == \"(?P<port_id>[a-z0-9\-]{36})\"')
cmds = []
for ls in self._nb_idl.ls_list().execute(check_error=True):
for qos in self._nb_idl.qos_list(ls.name).execute(
check_error=True):
if qos.external_ids:
continue
match = re.match(regex, qos.match)
if not match:
continue
port_id = match.group('port_id')
external_ids = {ovn_const.OVN_PORT_EXT_ID_KEY: port_id}
cmds.append(self._nb_idl.db_set(
'QoS', qos.uuid, ('external_ids', external_ids)))
if cmds:
with self._nb_idl.transaction(check_error=True) as txn:
for cmd in cmds:
txn.add(cmd)
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_vlan_distributed_ports(self):
"""Check VLAN distributed ports
Check for the option "reside-on-redirect-chassis" value for
distributed VLAN ports.
"""
if not self.has_lock:
return
context = n_context.get_admin_context()
cmds = []
# Get router ports belonging to VLAN networks
vlan_nets = self._ovn_client._plugin.get_networks(
context, {pnet.NETWORK_TYPE: [n_const.TYPE_VLAN]})
vlan_net_ids = [vn['id'] for vn in vlan_nets]
router_ports = self._ovn_client._plugin.get_ports(
context, {'network_id': vlan_net_ids,
'device_owner': n_const.ROUTER_PORT_OWNERS})
expected_value = ('false' if ovn_conf.is_ovn_distributed_floating_ip()
else 'true')
for rp in router_ports:
lrp_name = utils.ovn_lrouter_port_name(rp['id'])
lrp = self._nb_idl.get_lrouter_port(lrp_name)
if lrp.options.get(
ovn_const.LRP_OPTIONS_RESIDE_REDIR_CH) != expected_value:
opt = {ovn_const.LRP_OPTIONS_RESIDE_REDIR_CH: expected_value}
cmds.append(self._nb_idl.db_set(
'Logical_Router_Port', lrp_name, ('options', opt)))
if cmds:
with self._nb_idl.transaction(check_error=True) as txn:
for cmd in cmds:
txn.add(cmd)
raise periodics.NeverAgain()
# TODO(ralonsoh): Remove this in the Z+3 cycle. This method adds the
# "external_ids:OVN_GW_NETWORK_EXT_ID_KEY" to each router that has
# a gateway (that means, that has "external_ids:OVN_GW_PORT_EXT_ID_KEY").
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def update_logical_router_with_gateway_network_id(self):
"""Update all OVN logical router registers with the GW network ID"""
if not self.has_lock:
return
cmds = []
context = n_context.get_admin_context()
for lr in self._nb_idl.lr_list().execute(check_error=True):
gw_port = lr.external_ids.get(ovn_const.OVN_GW_PORT_EXT_ID_KEY)
gw_net = lr.external_ids.get(ovn_const.OVN_GW_NETWORK_EXT_ID_KEY)
if not gw_port or (gw_port and gw_net):
# This router does not have a gateway network assigned yet or
# it has a gateway port and its corresponding network.
continue
port = self._ovn_client._plugin.get_port(context, gw_port)
external_ids = {
ovn_const.OVN_GW_NETWORK_EXT_ID_KEY: port['network_id']}
cmds.append(self._nb_idl.db_set(
'Logical_Router', lr.uuid, ('external_ids', external_ids)))
if cmds:
with self._nb_idl.transaction(check_error=True) as txn:
for cmd in cmds:
txn.add(cmd)
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_baremetal_ports_dhcp_options(self):
"""Update baremetal ports DHCP options
Update baremetal ports DHCP options based on the
"disable_ovn_dhcp_for_baremetal_ports" configuration option.
"""
# If external ports is not supported stop running
# this periodic task
if not self._ovn_client.is_external_ports_supported():
raise periodics.NeverAgain()
if not self.has_lock:
return
context = n_context.get_admin_context()
ports = self._ovn_client._plugin.get_ports(
context,
filters={portbindings.VNIC_TYPE: portbindings.VNIC_BAREMETAL})
if not ports:
raise periodics.NeverAgain()
with self._nb_idl.transaction(check_error=True) as txn:
for port in ports:
lsp = self._nb_idl.lsp_get(port['id']).execute(
check_error=True)
if not lsp:
continue
update_dhcp = False
if ovn_conf.is_ovn_dhcp_disabled_for_baremetal():
if lsp.dhcpv4_options or lsp.dhcpv6_options:
update_dhcp = True
else:
if not lsp.dhcpv4_options and not lsp.dhcpv6_options:
update_dhcp = True
if update_dhcp:
port_info = self._ovn_client._get_port_options(port)
dhcpv4_options, dhcpv6_options = (
self._ovn_client.update_port_dhcp_options(
port_info, txn))
txn.add(self._nb_idl.set_lswitch_port(
lport_name=port['id'],
dhcpv4_options=dhcpv4_options,
dhcpv6_options=dhcpv6_options,
if_exists=False))
raise periodics.NeverAgain()
class HashRingHealthCheckPeriodics(object):
def __init__(self, group):
self._group = group
self.ctx = n_context.get_admin_context()
@periodics.periodic(spacing=ovn_const.HASH_RING_TOUCH_INTERVAL)
def touch_hash_ring_nodes(self):
# NOTE(lucasagomes): Note that we do not rely on the OVSDB lock
# here because we want the maintenance tasks from each instance to
# execute this task.
hash_ring_db.touch_nodes_from_host(self.ctx, self._group)
|
utils.py
|
import asyncio
from asyncio import TimeoutError
import atexit
import click
from collections import deque, OrderedDict, UserDict
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
import functools
from hashlib import md5
import html
import inspect
import json
import logging
import multiprocessing
import os
import re
import shutil
import socket
from time import sleep
import importlib
from importlib.util import cache_from_source
import inspect
import sys
import tempfile
import threading
import warnings
import weakref
import pkgutil
import base64
import tblib.pickling_support
import xml.etree.ElementTree
try:
import resource
except ImportError:
resource = None
import dask
from dask import istask
# provide format_bytes here for backwards compatibility
from dask.utils import ( # noqa
format_bytes,
funcname,
format_time,
parse_bytes,
parse_timedelta,
)
import toolz
import tornado
from tornado import gen
from tornado.ioloop import IOLoop
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import PYPY, WINDOWS, get_running_loop
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
if WINDOWS or PYPY:
return multiprocessing
else:
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ["distributed"]
if "pkg_resources" in sys.modules:
preload.append("pkg_resources")
from .versions import required_packages, optional_packages
for pkg, _ in required_packages + optional_packages:
try:
importlib.import_module(pkg)
except ImportError:
pass
else:
preload.append(pkg)
ctx.set_forkserver_preload(preload)
return ctx
mp_context = _initialize_mp_context()
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in inspect.getfullargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to hostname: %s" % (host, e),
RuntimeWarning,
)
addr_info = socket.getaddrinfo(
socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)[0]
return addr_info[4][0]
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET)
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6)
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
net_if_addrs = psutil.net_if_addrs()
if ifname not in net_if_addrs:
allowed_ifnames = list(net_if_addrs.keys())
raise ValueError(
"{!r} is not a valid network interface. "
"Valid network interfaces are: {}".format(ifname, allowed_ifnames)
)
for info in net_if_addrs[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions as e:
pass
@gen.coroutine
def ignore_exceptions(coroutines, *exceptions):
""" Process list of coroutines, ignoring certain exceptions
>>> coroutines = [cor(...) for ...] # doctest: +SKIP
>>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP
"""
wait_iterator = gen.WaitIterator(*coroutines)
results = []
while not wait_iterator.done():
with ignoring(*exceptions):
result = yield wait_iterator.next()
results.append(result)
raise gen.Return(results)
async def All(args, quiet_exceptions=()):
""" Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
""" Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
return results
async def Any(args, quiet_exceptions=()):
""" Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
""" Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
return results
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and (
(isinstance(loop, PollIOLoop) and getattr(loop, "_closing", False))
or (hasattr(loop, "asyncio_loop") and loop.asyncio_loop._closed)
):
raise RuntimeError("IOLoop is closed")
try:
if loop.asyncio_loop.is_closed(): # tornado 6
raise RuntimeError("IOLoop is closed")
except AttributeError:
pass
e = threading.Event()
main_tid = threading.get_ident()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
result[0] = yield future
except Exception as exc:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
if callback_timeout is not None:
if not e.wait(callback_timeout):
raise TimeoutError("timed out after %s s." % (callback_timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
typ, exc, tb = error[0]
raise exc.with_traceback(tb)
else:
return result[0]
class LoopRunner:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
self._should_close_loop = True
else:
self._loop = loop
self._should_close_loop = False
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError("not an exception", start_exc[0])
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with ignoring(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def clear_queue(q):
while not q.empty():
q.get_nowait()
def is_kernel():
""" Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
>>> key_split_group('x-1')
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return ""
elif typ is bytes:
return key_split_group(x.decode())
else:
return ""
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Change all StreamHandlers for the given logger to the given level
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
""" Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
""" Truncate exception to be about a certain length """
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
def tokey(o):
""" Convert an object to a string.
Examples
--------
>>> tokey(b'x')
b'x'
>>> tokey('x')
'x'
>>> tokey(1)
'1'
"""
typ = type(o)
if typ is str or typ is bytes:
return o
else:
return str(o)
def validate_key(k):
"""Validate a key as received on a stream.
"""
typ = type(k)
if typ is not str and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)" % (typ, k))
def _maybe_complex(task):
""" Possibly contains a nested task """
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def convert(task, dsk, extra_values):
if type(task) is list:
return [convert(v, dsk, extra_values) for v in task]
if type(task) is dict:
return {k: convert(v, dsk, extra_values) for k, v in task.items()}
if istask(task):
return (task[0],) + tuple(convert(x, dsk, extra_values) for x in task[1:])
try:
if task in dsk or task in extra_values:
return tokey(task)
except TypeError:
pass
return task
def str_graph(dsk, extra_values=()):
return {tokey(k): convert(v, dsk, extra_values) for k, v in dsk.items()}
def seek_delimiter(file, delimiter, blocksize):
""" Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
""" Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2 ** 16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2 ** 16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=""):
extension = "." + extension.lstrip(".")
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
try:
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
"""Attempt to turn `s` into bytes.
Parameters
----------
s : Any
The object to be converted. Will correctly handled
* str
* bytes
* objects implementing the buffer protocol (memoryview, ndarray, etc.)
Returns
-------
b : bytes
Raises
------
TypeError
When `s` cannot be converted
Examples
--------
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if hasattr(s, "encode"):
return s.encode()
else:
try:
return bytes(s)
except Exception as e:
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s
) from e
def divide_n_among_bins(n, bins):
"""
>>> divide_n_among_bins(12, [1, 1])
[6, 6]
>>> divide_n_among_bins(12, [1, 2])
[4, 8]
>>> divide_n_among_bins(12, [1, 2, 1])
[3, 6, 3]
>>> divide_n_among_bins(11, [1, 2, 1])
[2, 6, 3]
>>> divide_n_among_bins(11, [.1, .2, .1])
[2, 6, 3]
"""
total = sum(bins)
acc = 0.0
out = []
for b in bins:
now = n / total * b + acc
now, acc = divmod(now, 1)
out.append(int(now))
return out
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
if hasattr(sys, "is_finalizing"):
def shutting_down(is_finalizing=sys.is_finalizing):
return is_finalizing()
else:
_shutting_down = [False]
def _at_shutdown(l=_shutting_down):
l[0] = True
def shutting_down(l=_shutting_down):
return l[0]
atexit.register(_at_shutdown)
shutting_down.__doc__ = """
Whether the interpreter is currently shutting down.
For use in finalizers, __del__ methods, and similar; it is advised
to early bind this function rather than look it up when calling it,
since at shutdown module globals may be cleared.
"""
def open_port(host=""):
""" Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
""" Loads modules for a file (.py, .zip, .egg) """
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with ignoring(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
if sys.version_info >= (3, 6):
names = (mod_info.name for mod_info in pkgutil.iter_modules([path]))
else:
names = (mod_info[1] for mod_info in pkgutil.iter_modules([path]))
names_to_import.extend(names)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
importlib.invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(importlib.reload(importlib.import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
class itemgetter:
"""A picklable itemgetter.
Examples
--------
>>> data = [0, 1, 2]
>>> get_1 = itemgetter(1)
>>> get_1(data)
1
"""
__slots__ = ("index",)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
def nbytes(frame, _bytes_like=(bytes, bytearray)):
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def PeriodicCallback(callback, callback_time, io_loop=None):
"""
Wrapper around tornado.IOLoop.PeriodicCallback, for compatibility
with removal of the `io_loop` parameter in Tornado 5.0.
"""
if tornado.version_info >= (5,):
return tornado.ioloop.PeriodicCallback(callback, callback_time)
else:
return tornado.ioloop.PeriodicCallback(callback, callback_time, io_loop)
@contextmanager
def time_warn(duration, text):
start = time()
yield
end = time()
if end - start > duration:
print("TIME WARNING", text, end - start)
def json_load_robust(fn, load=json.load):
""" Reads a JSON file from disk that may be being written as we read """
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
class DequeHandler(logging.Handler):
""" A logging.Handler that records records into a deque """
_instances = weakref.WeakSet()
def __init__(self, *args, n=10000, **kwargs):
self.deque = deque(maxlen=n)
super(DequeHandler, self).__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
""" Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
if tornado.version_info[0] >= 5:
is_server_extension = False
if "notebook" in sys.modules:
import traitlets
from notebook.notebookapp import NotebookApp
is_server_extension = traitlets.config.Application.initialized() and isinstance(
traitlets.config.Application.instance(), NotebookApp
)
if not is_server_extension:
is_kernel_and_no_running_loop = False
if is_kernel():
try:
get_running_loop()
except RuntimeError:
is_kernel_and_no_running_loop = True
if not is_kernel_and_no_running_loop:
import tornado.platform.asyncio
asyncio.set_event_loop_policy(
tornado.platform.asyncio.AnyThreadEventLoopPolicy()
)
@functools.lru_cache(1000)
def has_keyword(func, keyword):
return keyword in inspect.signature(func).parameters
@functools.lru_cache(1000)
def command_has_keyword(cmd, k):
if cmd is not None:
if isinstance(cmd, str):
try:
from importlib import import_module
cmd = import_module(cmd)
except ImportError:
raise ImportError("Module for command %s is not available" % cmd)
if isinstance(getattr(cmd, "main"), click.core.Command):
cmd = cmd.main
if isinstance(cmd, click.core.Command):
cmd_params = set(
[
p.human_readable_name
for p in cmd.params
if isinstance(p, click.core.Option)
]
)
return k in cmd_params
return False
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
def iscoroutinefunction(f):
if gen.is_coroutine_function(f):
return True
if sys.version_info >= (3, 5) and inspect.iscoroutinefunction(f):
return True
return False
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
def typename(typ):
""" Return name of type
Examples
--------
>>> from distributed import Scheduler
>>> typename(Scheduler)
'distributed.scheduler.Scheduler'
"""
try:
return typ.__module__ + "." + typ.__name__
except AttributeError:
return str(typ)
def format_dashboard_link(host, port):
template = dask.config.get("distributed.dashboard.link")
if dask.config.get("distributed.scheduler.dashboard.tls.cert"):
scheme = "https"
else:
scheme = "http"
return template.format(
**toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port))
)
def is_coroutine_function(f):
return asyncio.iscoroutinefunction(f) or gen.is_coroutine_function(f)
class Log(str):
""" A container for logs """
def _repr_html_(self):
return "<pre><code>\n{log}\n</code></pre>".format(
log=html.escape(self.rstrip())
)
class Logs(dict):
""" A container for multiple logs """
def _repr_html_(self):
summaries = [
"<details>\n"
"<summary style='display:list-item'>{title}</summary>\n"
"{log}\n"
"</details>".format(title=title, log=log._repr_html_())
for title, log in sorted(self.items())
]
return "\n".join(summaries)
def cli_keywords(d: dict, cls=None, cmd=None):
""" Convert a kwargs dictionary into a list of CLI keywords
Parameters
----------
d: dict
The keywords to convert
cls: callable
The callable that consumes these terms to check them for validity
cmd: string or object
A string with the name of a module, or the module containing a
click-generated command with a "main" function, or the function itself.
It may be used to parse a module's custom arguments (i.e., arguments that
are not part of Worker class), such as nprocs from dask-worker CLI or
enable_nvlink from dask-cuda-worker CLI.
Examples
--------
>>> cli_keywords({"x": 123, "save_file": "foo.txt"})
['--x', '123', '--save-file', 'foo.txt']
>>> from dask.distributed import Worker
>>> cli_keywords({"x": 123}, Worker)
Traceback (most recent call last):
...
ValueError: Class distributed.worker.Worker does not support keyword x
"""
if cls or cmd:
for k in d:
if not has_keyword(cls, k) and not command_has_keyword(cmd, k):
if cls and cmd:
raise ValueError(
"Neither class %s or module %s support keyword %s"
% (typename(cls), typename(cmd), k)
)
elif cls:
raise ValueError(
"Class %s does not support keyword %s" % (typename(cls), k)
)
else:
raise ValueError(
"Module %s does not support keyword %s" % (typename(cmd), k)
)
def convert_value(v):
out = str(v)
if " " in out and "'" not in out and '"' not in out:
out = '"' + out + '"'
return out
return sum(
[["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()], []
)
def is_valid_xml(text):
return xml.etree.ElementTree.fromstring(text) is not None
try:
_offload_executor = ThreadPoolExecutor(
max_workers=1, thread_name_prefix="Dask-Offload"
)
except TypeError:
_offload_executor = ThreadPoolExecutor(max_workers=1)
weakref.finalize(_offload_executor, _offload_executor.shutdown)
def import_term(name: str):
""" Return the fully qualified term
Examples
--------
>>> import_term("math.sin")
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
async def offload(fn, *args, **kwargs):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(_offload_executor, lambda: fn(*args, **kwargs))
def serialize_for_cli(data):
""" Serialize data into a string that can be passthrough cli
Parameters
----------
data: json-serializable object
The data to serialize
Returns
-------
serialized_data: str
The serialized data as a string
"""
return base64.urlsafe_b64encode(json.dumps(data).encode()).decode()
def deserialize_for_cli(data):
""" De-serialize data into the original object
Parameters
----------
data: str
String serialied by serialize_for_cli()
Returns
-------
deserialized_data: obj
The de-serialized data
"""
return json.loads(base64.urlsafe_b64decode(data.encode()).decode())
class EmptyContext:
def __enter__(self):
pass
def __exit__(self, *args):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args):
pass
empty_context = EmptyContext()
class LRU(UserDict):
""" Limited size mapping, evicting the least recently looked-up key when full
"""
def __init__(self, maxsize):
super().__init__()
self.data = OrderedDict()
self.maxsize = maxsize
def __getitem__(self, key):
value = super().__getitem__(key)
self.data.move_to_end(key)
return value
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.data.popitem(last=False)
super().__setitem__(key, value)
|
test_response_cache.py
|
from ray.util.client.common import (_id_is_newer, ResponseCache,
OrderedResponseCache, INT32_MAX)
import threading
import time
import pytest
def test_id_is_newer():
"""
Sanity checks the logic for ID is newer. In general, we would expect
that higher IDs are newer than lower IDs, for example 25 can be assumed
to be newer than 24.
Since IDs roll over at INT32_MAX (~2**31), we should check for weird
behavior there. In particular, we would expect an ID like `11` to be
newer than the ID `2**31` since it's likely that the counter rolled
over.
"""
# Common cases -- higher IDs normally considered newer
assert _id_is_newer(30, 29)
assert _id_is_newer(12345, 12344)
assert not _id_is_newer(12344, 12345)
assert not _id_is_newer(5678, 5678)
# Check behavior near max int boundary
assert _id_is_newer(INT32_MAX, INT32_MAX - 1)
assert _id_is_newer(INT32_MAX - 1, INT32_MAX - 2)
# Low IDs are assumed newer than higher ones if it looks like rollover has
# occurred
assert _id_is_newer(0, INT32_MAX - 4)
assert _id_is_newer(1001, INT32_MAX - 123)
assert not _id_is_newer(INT32_MAX, 123)
def test_response_cache_complete_response():
"""
Test basic check/update logic of cache, and that nothing blocks
"""
cache = ResponseCache()
cache.check_cache(123, 15) # shouldn't block
cache.update_cache(123, 15, "abcdef")
assert cache.check_cache(123, 15) == "abcdef"
def test_ordered_response_cache_complete_response():
"""
Test basic check/update logic of ordered cache, and that nothing blocks
"""
cache = OrderedResponseCache()
cache.check_cache(15) # shouldn't block
cache.update_cache(15, "vwxyz")
assert cache.check_cache(15) == "vwxyz"
def test_response_cache_incomplete_response():
"""
Tests case where a cache entry is populated after a long time. Any new
threads attempting to access that entry should sleep until the response
is ready.
"""
cache = ResponseCache()
def populate_cache():
time.sleep(2)
cache.update_cache(123, 15, "abcdef")
cache.check_cache(123, 15) # shouldn't block
t = threading.Thread(target=populate_cache, args=())
t.start()
# Should block until other thread populates cache
assert cache.check_cache(123, 15) == "abcdef"
t.join()
def test_ordered_response_cache_incomplete_response():
"""
Tests case where an ordered cache entry is populated after a long time. Any
new threads attempting to access that entry should sleep until the response
is ready.
"""
cache = OrderedResponseCache()
def populate_cache():
time.sleep(2)
cache.update_cache(15, "vwxyz")
cache.check_cache(15) # shouldn't block
t = threading.Thread(target=populate_cache, args=())
t.start()
# Should block until other thread populates cache
assert cache.check_cache(15) == "vwxyz"
t.join()
def test_ordered_response_cache_cleanup():
"""
Tests that the cleanup method of ordered cache works as expected, in
particular that all entries <= the passed ID are cleared from the cache.
"""
cache = OrderedResponseCache()
for i in range(1, 21):
assert cache.check_cache(i) is None
cache.update_cache(i, str(i))
assert len(cache.cache) == 20
for i in range(1, 21):
assert cache.check_cache(i) == str(i)
# Expected: clean up all entries up to and including entry 10
cache.cleanup(10)
assert len(cache.cache) == 10
with pytest.raises(RuntimeError):
# Attempting to access value that has already been cleaned up
cache.check_cache(10)
for i in range(21, 31):
# Check that more entries can be inserted
assert cache.check_cache(i) is None
cache.update_cache(i, str(i))
# Cleanup everything
cache.cleanup(30)
assert len(cache.cache) == 0
with pytest.raises(RuntimeError):
cache.check_cache(30)
# Cleanup requests received out of order are tolerated
cache.cleanup(27)
cache.cleanup(23)
def test_response_cache_update_while_waiting():
"""
Tests that an error is thrown when a cache entry is updated with the
response for a different request than what was originally being
checked for.
"""
# Error when awaiting cache to update, but entry is cleaned up
cache = ResponseCache()
assert cache.check_cache(16, 123) is None
def cleanup_cache():
time.sleep(2)
cache.check_cache(16, 124)
cache.update_cache(16, 124, "asdf")
t = threading.Thread(target=cleanup_cache, args=())
t.start()
with pytest.raises(RuntimeError):
cache.check_cache(16, 123)
t.join()
def test_ordered_response_cache_cleanup_while_waiting():
"""
Tests that an error is thrown when an ordered cache entry is updated with
the response for a different request than what was originally being
checked for.
"""
# Error when awaiting cache to update, but entry is cleaned up
cache = OrderedResponseCache()
assert cache.check_cache(123) is None
def cleanup_cache():
time.sleep(2)
cache.cleanup(123)
t = threading.Thread(target=cleanup_cache, args=())
t.start()
with pytest.raises(RuntimeError):
cache.check_cache(123)
t.join()
def test_response_cache_cleanup():
"""
Checks that the response cache replaces old entries for a given thread
with new entries as they come in, instead of creating new entries
(possibly wasting memory on unneeded entries)
"""
# Check that the response cache cleans up previous entries for a given
# thread properly.
cache = ResponseCache()
cache.check_cache(16, 123)
cache.update_cache(16, 123, "Some response")
assert len(cache.cache) == 1
cache.check_cache(16, 124)
cache.update_cache(16, 124, "Second response")
assert len(cache.cache) == 1 # Should reuse entry for thread 16
assert cache.check_cache(16, 124) == "Second response"
def test_response_cache_invalidate():
"""
Check that ordered response cache invalidate works as expected
"""
cache = OrderedResponseCache()
e = RuntimeError("SomeError")
# No pending entries, cache should be valid
assert not cache.invalidate(e)
# No entry for 123 yet
assert cache.check_cache(123) is None
# this should invalidate the entry for 123
assert cache.invalidate(e)
assert cache.check_cache(123) == e
assert cache.invalidate(e)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
stats.py
|
import json
import platform
import subprocess
import threading
import time
from typing import Dict, List, Optional, Union
import psutil
import wandb
from wandb import util
from wandb.vendor.pynvml import pynvml
from . import tpu
from ..interface.interface_queue import InterfaceQueue
from ..lib import telemetry
GPUHandle = object
SamplerDict = Dict[str, List[float]]
StatsDict = Dict[str, Union[float, Dict[str, float]]]
# TODO: hard coded max watts as 16.5, found this number in the SMC list.
# Eventually we can have the apple_gpu_stats binary query for this.
M1_MAX_POWER_WATTS = 16.5
def gpu_in_use_by_this_process(gpu_handle: GPUHandle) -> bool:
if not psutil:
return False
# NOTE: this optimizes for the case where wandb was initialized from
# iniside the user script (i.e. `wandb.init()`). If we ran using
# `wandb run` on the command line, the shell will be detected as the
# parent, possible resulting in sibling processes being incorrectly
# indentified as part of this process -- still better than not
# detecting in-use gpus at all.
base_process = psutil.Process().parent() or psutil.Process()
our_processes = base_process.children(recursive=True)
our_processes.append(base_process)
our_pids = {process.pid for process in our_processes}
compute_pids = {
process.pid
for process in pynvml.nvmlDeviceGetComputeRunningProcesses(gpu_handle)
}
graphics_pids = {
process.pid
for process in pynvml.nvmlDeviceGetGraphicsRunningProcesses(gpu_handle)
}
pids_using_device = compute_pids | graphics_pids
return len(pids_using_device & our_pids) > 0
class SystemStats:
_pid: int
_interface: InterfaceQueue
sampler: SamplerDict
samples: int
_thread: Optional[threading.Thread]
gpu_count: int
def __init__(self, pid: int, interface: InterfaceQueue) -> None:
try:
pynvml.nvmlInit()
self.gpu_count = pynvml.nvmlDeviceGetCount()
except pynvml.NVMLError:
self.gpu_count = 0
# self.run = run
self._pid = pid
self._interface = interface
self.sampler = {}
self.samples = 0
self._shutdown = False
self._telem = telemetry.TelemetryRecord()
if psutil:
net = psutil.net_io_counters()
self.network_init = {"sent": net.bytes_sent, "recv": net.bytes_recv}
else:
wandb.termlog(
"psutil not installed, only GPU stats will be reported. Install with pip install psutil"
)
self._thread = None
self._tpu_profiler = None
if tpu.is_tpu_available():
try:
self._tpu_profiler = tpu.get_profiler()
except Exception as e:
wandb.termlog("Error initializing TPUProfiler: " + str(e))
def start(self) -> None:
if self._thread is None:
self._shutdown = False
self._thread = threading.Thread(target=self._thread_body)
self._thread.name = "StatsThr"
self._thread.daemon = True
if not self._thread.is_alive():
self._thread.start()
if self._tpu_profiler:
self._tpu_profiler.start()
@property
def proc(self) -> psutil.Process:
return psutil.Process(pid=self._pid)
@property
def sample_rate_seconds(self) -> float:
"""Sample system stats every this many seconds, defaults to 2, min is 0.5"""
return 2
# return max(0.5, self._api.dynamic_settings["system_sample_seconds"])
@property
def samples_to_average(self) -> int:
"""The number of samples to average before pushing, defaults to 15 valid range (2:30)"""
return 15
# return min(30, max(2, self._api.dynamic_settings["system_samples"]))
def _thread_body(self) -> None:
while True:
stats = self.stats()
for stat, value in stats.items():
if isinstance(value, (int, float)):
self.sampler[stat] = self.sampler.get(stat, [])
self.sampler[stat].append(value)
self.samples += 1
if self._shutdown or self.samples >= self.samples_to_average:
self.flush()
if self._shutdown:
break
seconds = 0.0
while seconds < self.sample_rate_seconds:
time.sleep(0.1)
seconds += 0.1
if self._shutdown:
self.flush()
return
def shutdown(self) -> None:
self._shutdown = True
try:
if self._thread is not None:
self._thread.join()
finally:
self._thread = None
if self._tpu_profiler:
self._tpu_profiler.stop()
def flush(self) -> None:
stats = self.stats()
for stat, value in stats.items():
# TODO: a bit hacky, we assume all numbers should be averaged. If you want
# max for a stat, you must put it in a sub key, like ["network"]["sent"]
if isinstance(value, (float, int)):
# samples = list(self.sampler.get(stat, [stats[stat]]))
samples = list(self.sampler.get(stat, [value]))
stats[stat] = round(sum(samples) / len(samples), 2)
# self.run.events.track("system", stats, _wandb=True)
if self._interface:
self._interface.publish_stats(stats)
self.samples = 0
self.sampler = {}
def stats(self) -> StatsDict:
stats: StatsDict = {}
for i in range(0, self.gpu_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
try:
utilz = pynvml.nvmlDeviceGetUtilizationRates(handle)
memory = pynvml.nvmlDeviceGetMemoryInfo(handle)
temp = pynvml.nvmlDeviceGetTemperature(
handle, pynvml.NVML_TEMPERATURE_GPU
)
in_use_by_us = gpu_in_use_by_this_process(handle)
stats["gpu.{}.{}".format(i, "gpu")] = utilz.gpu
stats["gpu.{}.{}".format(i, "memory")] = utilz.memory
stats["gpu.{}.{}".format(i, "memoryAllocated")] = (
memory.used / float(memory.total)
) * 100
stats["gpu.{}.{}".format(i, "temp")] = temp
if in_use_by_us:
stats["gpu.process.{}.{}".format(i, "gpu")] = utilz.gpu
stats["gpu.process.{}.{}".format(i, "memory")] = utilz.memory
stats["gpu.process.{}.{}".format(i, "memoryAllocated")] = (
memory.used / float(memory.total)
) * 100
stats["gpu.process.{}.{}".format(i, "temp")] = temp
# Some GPUs don't provide information about power usage
try:
power_watts = pynvml.nvmlDeviceGetPowerUsage(handle) / 1000.0
power_capacity_watts = (
pynvml.nvmlDeviceGetEnforcedPowerLimit(handle) / 1000.0
)
power_usage = (power_watts / power_capacity_watts) * 100
stats["gpu.{}.{}".format(i, "powerWatts")] = power_watts
stats["gpu.{}.{}".format(i, "powerPercent")] = power_usage
if in_use_by_us:
stats["gpu.process.{}.{}".format(i, "powerWatts")] = power_watts
stats[
"gpu.process.{}.{}".format(i, "powerPercent")
] = power_usage
except pynvml.NVMLError:
pass
except pynvml.NVMLError:
pass
# On Apple M1 systems let's look for the gpu
if (
platform.system() == "Darwin"
and platform.processor() == "arm"
and self.gpu_count == 0
):
try:
out = subprocess.check_output([util.apple_gpu_stats_binary(), "--json"])
m1_stats = json.loads(out.split(b"\n")[0])
stats["gpu.0.gpu"] = m1_stats["utilization"]
stats["gpu.0.memoryAllocated"] = m1_stats["mem_used"]
stats["gpu.0.temp"] = m1_stats["temperature"]
stats["gpu.0.powerWatts"] = m1_stats["power"]
stats["gpu.0.powerPercent"] = (
m1_stats["power"] / M1_MAX_POWER_WATTS
) * 100
# TODO: this stat could be useful eventually, it was consistently
# 0 in my experimentation and requires a frontend change
# so leaving it out for now.
# stats["gpu.0.cpuWaitMs"] = m1_stats["cpu_wait_ms"]
if self._interface and not self._telem.env.m1_gpu:
self._telem.env.m1_gpu = True
self._interface._publish_telemetry(self._telem)
except (OSError, ValueError, TypeError, subprocess.CalledProcessError) as e:
wandb.termwarn(f"GPU stats error {e}")
pass
if psutil:
net = psutil.net_io_counters()
sysmem = psutil.virtual_memory()
stats["cpu"] = psutil.cpu_percent()
stats["memory"] = sysmem.percent
stats["network"] = {
"sent": net.bytes_sent - self.network_init["sent"],
"recv": net.bytes_recv - self.network_init["recv"],
}
# TODO: maybe show other partitions, will likely need user to configure
stats["disk"] = psutil.disk_usage("/").percent
stats["proc.memory.availableMB"] = sysmem.available / 1048576.0
try:
stats["proc.memory.rssMB"] = self.proc.memory_info().rss / 1048576.0
stats["proc.memory.percent"] = self.proc.memory_percent()
stats["proc.cpu.threads"] = self.proc.num_threads()
except psutil.NoSuchProcess:
pass
if self._tpu_profiler:
tpu_utilization = self._tpu_profiler.get_tpu_utilization()
if tpu_utilization is not None:
stats["tpu"] = tpu_utilization
return stats
|
lifo_queues.py
|
import threading
import queue
import random
import time
def my_subscriber(queue_p):
while not queue_p.empty():
item = queue_p.get()
if item is None:
break
print("{} removed {} from the queue".format(threading.current_thread(), item))
queue_p.task_done()
myQueue = queue.LifoQueue()
for i in range(10):
myQueue.put(i)
print("Queue Populated")
threads = []
for i in range(2):
thread = threading.Thread(target=my_subscriber, args=(myQueue,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
print("Queue is empty")
|
async_plc.py
|
#!/usr/bin/env python
# SCADA Simulator
#
# Copyright 2018 Carnegie Mellon University. All Rights Reserved.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
#
# Released under a MIT (SEI)-style license, please see license.txt or contact permission@sei.cmu.edu for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use and distribution.
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Packery (https://packery.metafizzy.co/license.html) Copyright 2018 metafizzy.
# 2. Bootstrap (https://getbootstrap.com/docs/4.0/about/license/) Copyright 2011-2018 Twitter, Inc. and Bootstrap Authors.
# 3. JIT/Spacetree (https://philogb.github.io/jit/demos.html) Copyright 2013 Sencha Labs.
# 4. html5shiv (https://github.com/aFarkas/html5shiv/blob/master/MIT%20and%20GPL2%20licenses.md) Copyright 2014 Alexander Farkas.
# 5. jquery (https://jquery.org/license/) Copyright 2018 jquery foundation.
# 6. CanvasJS (https://canvasjs.com/license/) Copyright 2018 fenopix.
# 7. Respond.js (https://github.com/scottjehl/Respond/blob/master/LICENSE-MIT) Copyright 2012 Scott Jehl.
# 8. Datatables (https://datatables.net/license/) Copyright 2007 SpryMedia.
# 9. jquery-bridget (https://github.com/desandro/jquery-bridget) Copyright 2018 David DeSandro.
# 10. Draggabilly (https://draggabilly.desandro.com/) Copyright 2018 David DeSandro.
# 11. Business Casual Bootstrap Theme (https://startbootstrap.com/template-overviews/business-casual/) Copyright 2013 Blackrock Digital LLC.
# 12. Glyphicons Fonts (https://www.glyphicons.com/license/) Copyright 2010 - 2018 GLYPHICONS.
# 13. Bootstrap Toggle (http://www.bootstraptoggle.com/) Copyright 2011-2014 Min Hur, The New York Times.
# DM18-1351
#
'''
Asynchronous PyModbus Server with Client Functionality
Used for SCADASim 2.0
'''
# --------------------------------------------------------------------------- #
# import the modbus libraries we need
# --------------------------------------------------------------------------- #
from pymodbus.server.asynchronous import StartSerialServer
from pymodbus.server.asynchronous import StartTcpServer
from pymodbus.server.asynchronous import StartUdpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSequentialDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusAsciiFramer, ModbusBinaryFramer
# --------------------------------------------------------------------------- #
# import the other libraries we need
# --------------------------------------------------------------------------- #
from datastore import *
from helper import *
from time import *
from threading import Thread
import logging, yaml
import sys, os, argparse
'''
@brief reads from backup, initializes the datastore, starts the backup thread and the register behavior threads, then starts the server
'''
def run_updating_server(config_list, backup_filename, log):
# ----------------------------------------------------------------------- #
# initialize your data store
# ----------------------------------------------------------------------- #
# Run datastore_backup_on_start to use the most recent values of the datablocks, as the layout in the master config will only reflect initial values
# If this is the first time this is used, the backup file will match up with what is laid out in the master config (due to master.py)
datastore_config = datastore_backup_on_start(backup_filename)
if datastore_config == -1:
print(backup_filename)
print("Issue with backup file - either not created or empty. Exiting program.")
sys.exit()
store = ModbusSlaveContext(
di=ModbusSequentialDataBlock(datastore_config['di']['start_addr'], datastore_config['di']['values']),
co=ModbusSequentialDataBlock(datastore_config['co']['start_addr'], datastore_config['co']['values']),
hr=ModbusSequentialDataBlock(datastore_config['hr']['start_addr'], datastore_config['hr']['values']),
ir=ModbusSequentialDataBlock(datastore_config['ir']['start_addr'], datastore_config['ir']['values']))
# Could have multiple slaves, with their own addressing. Since we have 1 PLC device handled by every async_plc.py, it is not necessary
context = ModbusServerContext(slaves=store, single=True)
# setup a thread with target as datastore_backup_to_yaml to start here, before other threads
# this will continuously read from the context to write to a backup yaml file
backup_thread = Thread(target=datastore_backup_to_yaml, args=(context, backup_filename))
backup_thread.daemon = True
backup_thread.start()
# start register behaviors. Updating writer is started off, which will spawn a thread for every holding register based on the config
thread = Thread(target=updating_writer, args=(context, config_list, time, log, backup_filename))
thread.daemon = True
thread.start()
# Starting the server
server_config = config_list['SERVER']
framer = configure_server_framer(server_config)
if server_config['type'] == 'serial':
StartSerialServer(context, port=server_config['port'], framer=framer)
elif server_config['type'] == 'udp':
StartUdpServer(context, identity=identity, address=(server_config['address'], int(server_config['port'])))
elif server_config['type'] == 'tcp':
if server_config['framer'] == 'RTU':
StartTcpServer(context, identity=identity, address=(server_config['address'], int(server_config['port'])), framer=framer)
else:
StartTcpServer(context, address=(server_config['address'], int(server_config['port'])))
'''
@brief parse args, handle master config, setup logging, then call run_updating_server
'''
def main():
# --- BEGIN argparse handling ---
parser = argparse.ArgumentParser(description = "Main program for PLC device based off PyModbus")
parser.add_argument("--n", "--num_of_PLC", help = "The number of the PLC device")
parser.add_argument("--c", "--config_filename", help = "Name of the master config file")
args = parser.parse_args()
if args.n is None or args.c is None:
print("Need to run async_plc.py with --n and --c arguments. Run 'python async_plc.py --h' for help")
return
print( args )
num_of_PLC = args.n
master_config_filename = args.c
backup_filename = '/home/hp/Desktop/SCADASim/backups/backup_' + args.n + '.yaml'
# --- END argparse handling ---
stream = open(master_config_filename, 'r')
config_list = yaml.safe_load(stream)
stream.close()
# Only get the current PLC's configuration dictionary
config_list = config_list["PLC " + num_of_PLC]
# --- BEGIN LOGGING SETUP ---
FORMAT = config_list['LOGGING']['format']
# Add logic based on whether a file is used or stdout
# AND whether a format string is used or not
if config_list['LOGGING']['file'] == 'STDOUT':
if FORMAT == 'NONE':
logging.basicConfig()
else:
logging.basicConfig(format=FORMAT)
else:
if FORMAT == 'NONE':
logging.basicConfig(filename=config_list['LOGGING']['file'])
else:
logging.basicConfig(format=FORMAT, filename=config_list['LOGGING']['file'])
log = logging.getLogger()
configure_logging_level(config_list['LOGGING']['logging_level'], log)
# --- END LOGGING SETUP ---
run_updating_server(config_list, backup_filename, log)
if __name__ == "__main__":
main()
|
misc_utils.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Miscellaneous utility functions
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
import operator
from urllib.parse import quote, urlencode
from resources.lib.globals import G
def find(value_to_find, attribute, search_space):
"""Find a video with matching id in a dict or list"""
for video in search_space:
if video[attribute] == value_to_find:
return video
raise KeyError(f'Metadata for {value_to_find} does not exist')
def find_episode_metadata(episode_videoid, metadata):
"""Find metadata for a specific episode within a show metadata dict"""
season = find(int(episode_videoid.seasonid), 'id', metadata['seasons'])
episode = find(int(episode_videoid.episodeid), 'id', season.get('episodes', {}))
return episode, season
def get_class_methods(class_item=None):
"""
Returns the class methods of agiven class object
:param class_item: Class item to introspect
:type class_item: object
:returns: list -- Class methods
"""
from types import FunctionType
_type = FunctionType
return [x for x, y in class_item.__dict__.items()
if isinstance(y, _type)]
def build_url(pathitems=None, videoid=None, params=None, mode=None):
"""Build a plugin URL from pathitems and query parameters. Add videoid to the path if it's present."""
if not (pathitems or videoid):
raise ValueError('Either pathitems or videoid must be set.')
path = f'{G.BASE_URL}/{_encode_path(mode, pathitems, videoid)}/{_encode_params(params)}'
return path
def _expand_mode(mode):
return [mode] if mode else []
def _expand_videoid(videoid):
return videoid.to_path() if videoid else []
def _encode_path(mode, pathitems, videoid):
return quote(
'/'.join(_expand_mode(mode) +
(pathitems or []) +
_expand_videoid(videoid)).encode('utf-8'))
def _encode_params(params):
return f'?{urlencode(params)}' if params else ''
def is_numeric(string):
"""Return true if string represents an integer, else false"""
try:
int(string)
except ValueError:
return False
return True
def strp(value, form):
"""
Helper function to safely create datetime objects from strings
:return: datetime - parsed datetime object
"""
# pylint: disable=broad-except
from datetime import datetime
def_value = datetime.utcfromtimestamp(0)
try:
return datetime.strptime(value, form)
except TypeError:
# Python bug https://bugs.python.org/issue27400
try:
from time import strptime
return datetime(*(strptime(value, form)[0:6]))
except ValueError:
return def_value
except Exception:
return def_value
def strf_timestamp(timestamp, form):
"""
Helper function to safely create string date time from a timestamp value
:return: string - date time in the specified form
"""
from datetime import datetime
try:
return datetime.utcfromtimestamp(timestamp).strftime(form)
except Exception: # pylint: disable=broad-except
return ''
# def compress_data(data):
# """GZIP and b64 encode data"""
# out = StringIO()
# with gzip.GzipFile(fileobj=out, mode='w') as outh:
# outh.write(data)
# return base64.standard_b64encode(out.getvalue())
def merge_dicts(dict_to_merge, merged_dict):
"""Recursively merge the contents of dict_to_merge into merged_dict.
Values that are already present in merged_dict will be overwritten if they are also present in dict_to_merge"""
for key, value in dict_to_merge.items():
if isinstance(merged_dict.get(key), dict):
merge_dicts(value, merged_dict[key])
else:
merged_dict[key] = value
return merged_dict
def compare_dict_keys(dict_a, dict_b, compare_keys):
"""Compare two dictionaries with the specified keys"""
return all(dict_a[k] == dict_b[k] for k in dict_a if k in compare_keys)
def chunked_list(seq, chunk_len):
for start in range(0, len(seq), chunk_len):
yield seq[start:start + chunk_len]
def any_value_except(mapping, excluded_keys):
"""Return a random value from a dict that is not associated with excluded_key.
Raises StopIteration if there are no other keys than excluded_key"""
return next(mapping[key] for key in mapping if key not in excluded_keys)
def enclose_quotes(content):
return f'"{content}"'
def make_list(arg):
"""Return a list with arg as its member or arg if arg is already a list. Returns an empty list if arg is None"""
return (arg
if isinstance(arg, list)
else ([arg]
if arg is not None
else []))
def convert_seconds_to_hms_str(time):
h = int(time // 3600)
time %= 3600
m = int(time // 60)
s = int(time % 60)
return '{:02d}:{:02d}:{:02d}'.format(h, m, s)
def remove_html_tags(raw_html):
import re
pattern = re.compile('<.*?>')
return re.sub(pattern, '', raw_html)
def censure(value, length=3):
"""Censor part of the string with asterisks"""
if not value:
return value
return value[:-length] + '*' * length
def run_threaded(non_blocking, target_func, *args, **kwargs):
"""Call a function in a thread, when specified"""
if not non_blocking:
return target_func(*args, **kwargs)
from threading import Thread
Thread(target=target_func, args=args, kwargs=kwargs).start()
return None
class CmpVersion:
"""Comparator for version numbers"""
def __init__(self, version):
self.version = version
def __str__(self):
return self.version
def __repr__(self):
return self.version
def __lt__(self, other):
"""Operator <"""
return operator.lt(*zip(*map(lambda x, y: (x or 0, y or 0),
map(int, self.version.split('.')),
map(int, other.split('.')))))
def __le__(self, other):
"""Operator <="""
return operator.le(*zip(*map(lambda x, y: (x or 0, y or 0),
map(int, self.version.split('.')),
map(int, other.split('.')))))
def __gt__(self, other):
"""Operator >"""
return operator.gt(*zip(*map(lambda x, y: (x or 0, y or 0),
map(int, self.version.split('.')),
map(int, other.split('.')))))
def __ge__(self, other):
"""Operator >="""
return operator.ge(*zip(*map(lambda x, y: (x or 0, y or 0),
map(int, self.version.split('.')),
map(int, other.split('.')))))
def __eq__(self, other):
"""Operator =="""
return operator.eq(*zip(*map(lambda x, y: (x or 0, y or 0),
map(int, self.version.split('.')),
map(int, other.split('.')))))
def __ne__(self, other):
"""Operator !="""
return operator.ne(*zip(*map(lambda x, y: (x or 0, y or 0),
map(int, self.version.split('.')),
map(int, other.split('.')))))
|
Spec.py
|
# -*- coding: utf-8 -*-
import glob
import io
import sys
import os
from collections import defaultdict, OrderedDict
from datetime import datetime
from . import biblio
from . import boilerplate
from . import caniuse
from . import mdnspeclinks
from . import config
from . import constants
from . import datablocks
from . import extensions
from . import fingerprinting
from . import headings
from . import highlight
from . import html
from . import idl
from . import includes
from . import inlineTags
from . import lint
from . import markdown
from . import metadata
from . import shorthands
from . import wpt
from .html import *
from .InputSource import InputSource, FileInputSource, StdinInputSource
from .Line import Line
from .messages import *
from .refs import ReferenceManager
from .unsortedJunk import *
class Spec(object):
def __init__(self, inputFilename, debug=False, token=None, lineNumbers=False, fileRequester=None, testing=False):
self.valid = False
self.lineNumbers = lineNumbers
if lineNumbers:
# line-numbers are too hacky, so force this to be a dry run
constants.dryRun = True
if inputFilename is None:
inputFilename = findImplicitInputFile()
if inputFilename is None: # still
die("No input file specified, and no *.bs or *.src.html files found in current directory.\nPlease specify an input file, or use - to pipe from STDIN.")
return
self.inputSource = InputSource(inputFilename)
self.transitiveDependencies = set()
self.debug = debug
self.token = token
self.testing = testing
if fileRequester is None:
self.dataFile = config.defaultRequester
else:
self.dataFile = fileRequester
self.valid = self.initializeState()
def initializeState(self):
self.normativeRefs = {}
self.informativeRefs = {}
self.refs = ReferenceManager(fileRequester=self.dataFile, testing=self.testing)
self.externalRefsUsed = defaultdict(lambda:defaultdict(dict))
self.md = None
self.mdBaseline = metadata.MetadataManager()
self.mdDocument = None
self.mdCommandLine = metadata.MetadataManager()
self.mdDefaults = None
self.mdOverridingDefaults = None
self.biblios = {}
self.typeExpansions = {}
self.macros = defaultdict(lambda x: "???")
self.canIUse = {}
self.mdnSpecLinks = {}
self.widl = idl.getParser()
self.testSuites = json.loads(self.dataFile.fetch("test-suites.json", str=True))
self.languages = json.loads(self.dataFile.fetch("languages.json", str=True))
self.extraStyles = defaultdict(str)
self.extraStyles['style-md-lists'] = styleMdLists
self.extraStyles['style-autolinks'] = styleAutolinks
self.extraStyles['style-selflinks'] = styleSelflinks
self.extraStyles['style-counters'] = styleCounters
self.extraScripts = defaultdict(str)
try:
inputContent = self.inputSource.read()
self.lines = inputContent.lines
if inputContent.date is not None:
self.mdBaseline.addParsedData("Date", inputContent.date)
except OSError:
die("Couldn't find the input file at the specified location '{0}'.", self.inputSource)
return False
except IOError:
die("Couldn't open the input file '{0}'.", self.inputSource)
return False
return True
def recordDependencies(self, *inputSources):
self.transitiveDependencies.update(inputSources)
def preprocess(self):
self.transitiveDependencies.clear()
self.assembleDocument()
self.processDocument()
def assembleDocument(self):
# Textual hacks
stripBOM(self)
if self.lineNumbers:
self.lines = hackyLineNumbers(self.lines)
self.lines = markdown.stripComments(self.lines)
self.recordDependencies(self.inputSource)
# Extract and process metadata
self.lines, self.mdDocument = metadata.parse(lines=self.lines)
# First load the metadata sources from 'local' data
self.md = metadata.join(self.mdBaseline, self.mdDocument, self.mdCommandLine)
# Using that to determine the Group and Status, load the correct defaults.include boilerplate
self.mdDefaults = metadata.fromJson(data=config.retrieveBoilerplateFile(self, 'defaults', error=True), source="defaults")
self.md = metadata.join(self.mdBaseline, self.mdDefaults, self.mdDocument, self.mdCommandLine)
# Using all of that, load up the text macros so I can sub them into the computed-metadata file.
self.md.fillTextMacros(self.macros, doc=self)
jsonEscapedMacros = {k: json.dumps(v)[1:-1] for k,v in self.macros.items()}
computedMdText = replaceMacros(config.retrieveBoilerplateFile(self, 'computed-metadata', error=True), macros=jsonEscapedMacros)
self.mdOverridingDefaults = metadata.fromJson(data=computedMdText, source="computed-metadata")
self.md = metadata.join(self.mdBaseline, self.mdDefaults, self.mdOverridingDefaults, self.mdDocument, self.mdCommandLine)
# Finally, compute the "implicit" things.
self.md.computeImplicitMetadata(doc=self)
# And compute macros again, in case the preceding steps changed them.
self.md.fillTextMacros(self.macros, doc=self)
self.md.validate()
extensions.load(self)
# Initialize things
self.refs.initializeRefs(self)
self.refs.initializeBiblio()
# Deal with further <pre> blocks, and markdown
self.lines = datablocks.transformDataBlocks(self, self.lines)
self.lines = markdown.parse(self.lines, self.md.indent, opaqueElements=self.md.opaqueElements, blockElements=self.md.blockElements)
# Note that, currently, markdown.parse returns an array of strings, not of Line objects.
self.refs.setSpecData(self.md)
# Convert to a single string of html now, for convenience.
self.html = ''.join(l.text for l in self.lines)
boilerplate.addHeaderFooter(self)
self.html = self.fixText(self.html)
# Build the document
self.document = parseDocument(self.html)
self.head = find("head", self)
self.body = find("body", self)
correctH1(self)
includes.processInclusions(self)
metadata.parseDoc(self)
def processDocument(self):
# Fill in and clean up a bunch of data
self.fillContainers = locateFillContainers(self)
lint.exampleIDs(self)
boilerplate.addBikeshedVersion(self)
boilerplate.addCanonicalURL(self)
boilerplate.addFavicon(self)
boilerplate.addSpecVersion(self)
boilerplate.addStatusSection(self)
boilerplate.addLogo(self)
boilerplate.addCopyright(self)
boilerplate.addSpecMetadataSection(self)
boilerplate.addAbstract(self)
boilerplate.addExpiryNotice(self)
boilerplate.addObsoletionNotice(self)
boilerplate.addAtRisk(self)
addNoteHeaders(self)
boilerplate.removeUnwantedBoilerplate(self)
shorthands.run(self)
inlineTags.processTags(self)
canonicalizeShortcuts(self)
addImplicitAlgorithms(self)
fixManualDefTables(self)
headings.processHeadings(self)
checkVarHygiene(self)
processIssuesAndExamples(self)
idl.markupIDL(self)
inlineRemoteIssues(self)
wpt.processWptElements(self)
# Handle all the links
processBiblioLinks(self)
processDfns(self)
idl.processIDL(self)
fillAttributeInfoSpans(self)
formatArgumentdefTables(self)
formatElementdefTables(self)
processAutolinks(self)
biblio.dedupBiblioReferences(self)
verifyUsageOfAllLocalBiblios(self)
caniuse.addCanIUsePanels(self)
mdnspeclinks.addMdnPanels(self)
boilerplate.addIndexSection(self)
boilerplate.addExplicitIndexes(self)
boilerplate.addStyles(self)
boilerplate.addReferencesSection(self)
boilerplate.addPropertyIndex(self)
boilerplate.addIDLSection(self)
boilerplate.addIssuesSection(self)
boilerplate.addCustomBoilerplate(self)
headings.processHeadings(self, "all") # again
boilerplate.removeUnwantedBoilerplate(self)
boilerplate.addTOCSection(self)
addSelfLinks(self)
processAutolinks(self)
boilerplate.addAnnotations(self)
boilerplate.removeUnwantedBoilerplate(self)
highlight.addSyntaxHighlighting(self)
boilerplate.addBikeshedBoilerplate(self)
fingerprinting.addTrackingVector(self)
fixIntraDocumentReferences(self)
fixInterDocumentReferences(self)
removeMultipleLinks(self)
forceCrossorigin(self)
lint.brokenLinks(self)
lint.accidental2119(self)
lint.missingExposed(self)
lint.requiredIDs(self)
lint.unusedInternalDfns(self)
# Any final HTML cleanups
cleanupHTML(self)
if self.md.prepTR:
# Don't try and override the W3C's icon.
for el in findAll("[rel ~= 'icon']", self):
removeNode(el)
# Make sure the W3C stylesheet is after all other styles.
for el in findAll("link", self):
if el.get("href").startswith("https://www.w3.org/StyleSheets/TR"):
appendChild(find("head", self), el)
# Ensure that all W3C links are https.
for el in findAll("a", self):
href = el.get("href", "")
if href.startswith("http://www.w3.org") or href.startswith("http://lists.w3.org"):
el.set("href", "https" + href[4:])
text = el.text or ""
if text.startswith("http://www.w3.org") or text.startswith("http://lists.w3.org"):
el.text = "https" + text[4:]
extensions.BSPrepTR(self)
return self
def serialize(self):
try:
rendered = html.Serializer(self.md.opaqueElements, self.md.blockElements).serialize(self.document)
except e:
die("{0}", e)
rendered = finalHackyCleanup(rendered)
return rendered
def fixMissingOutputFilename(self, outputFilename):
if outputFilename is None:
# More sensible defaults!
if not isinstance(self.inputSource, FileInputSource):
outputFilename = "-"
elif self.inputSource.sourceName.endswith(".bs"):
outputFilename = self.inputSource.sourceName[0:-3] + ".html"
elif self.inputSource.sourceName.endswith(".src.html"):
outputFilename = self.inputSource.sourceName[0:-9] + ".html"
else:
outputFilename = "-"
return outputFilename
def finish(self, outputFilename=None):
self.printResultMessage()
outputFilename = self.fixMissingOutputFilename(outputFilename)
rendered = self.serialize()
if not constants.dryRun:
try:
if outputFilename == "-":
sys.stdout.write(rendered)
else:
with io.open(outputFilename, "w", encoding="utf-8") as f:
f.write(rendered)
except Exception as e:
die("Something prevented me from saving the output document to {0}:\n{1}", outputFilename, e)
def printResultMessage(self):
# If I reach this point, I've succeeded, but maybe with reservations.
fatals = messageCounts['fatal']
links = messageCounts['linkerror']
warnings = messageCounts['warning']
if self.lineNumbers:
warn("Because --line-numbers was used, no output was saved.")
if fatals:
success("Successfully generated, but fatal errors were suppressed")
return
if links:
success("Successfully generated, with {0} linking errors", links)
return
if warnings:
success("Successfully generated, with warnings")
return
def watch(self, outputFilename, port=None, localhost=False):
import time
outputFilename = self.fixMissingOutputFilename(outputFilename)
if self.inputSource.mtime() is None:
die("Watch mode doesn't support {}".format(self.inputSource))
if outputFilename == "-":
die("Watch mode doesn't support streaming to STDOUT.")
return
if port:
# Serve the folder on an HTTP server
import http.server
import socketserver
import threading
class SilentServer(http.server.SimpleHTTPRequestHandler):
def log_message(*args):
pass
socketserver.TCPServer.allow_reuse_address = True
server = socketserver.TCPServer(
("localhost" if localhost else "", port), SilentServer)
print("Serving at port {0}".format(port))
thread = threading.Thread(target = server.serve_forever)
thread.daemon = True
thread.start()
else:
server = None
mdCommandLine = self.mdCommandLine
try:
self.preprocess()
self.finish(outputFilename)
lastInputModified = {dep: dep.mtime()
for dep in self.transitiveDependencies}
p("==============DONE==============")
try:
while True:
# Comparing mtimes with "!=" handles when a file starts or
# stops existing, and it's fine to rebuild if an mtime
# somehow gets older.
if any(input.mtime() != lastModified for input, lastModified in lastInputModified.items()):
resetSeenMessages()
p("Source file modified. Rebuilding...")
self.initializeState()
self.mdCommandLine = mdCommandLine
self.preprocess()
self.finish(outputFilename)
lastInputModified = {dep: dep.mtime()
for dep in self.transitiveDependencies}
p("==============DONE==============")
time.sleep(1)
except KeyboardInterrupt:
p("Exiting~")
if server:
server.shutdown()
thread.join()
sys.exit(0)
except Exception as e:
die("Something went wrong while watching the file:\n{0}", e)
def fixText(self, text, moreMacros={}):
# Do several textual replacements that need to happen *before* the document is parsed as HTML.
# If markdown shorthands are on, remove all `foo`s while processing,
# so their contents don't accidentally trigger other stuff.
# Also handle markdown escapes.
if "markdown" in self.md.markupShorthands:
textFunctor = MarkdownCodeSpans(text)
else:
textFunctor = func.Functor(text)
macros = dict(self.macros, **moreMacros)
textFunctor = textFunctor.map(curry(replaceMacros, macros=macros))
textFunctor = textFunctor.map(fixTypography)
if "css" in self.md.markupShorthands:
textFunctor = textFunctor.map(replaceAwkwardCSSShorthands)
return textFunctor.extract()
def printTargets(self):
p("Exported terms:")
for el in findAll("[data-export]", self):
for term in config.linkTextsFromElement(el):
p(" " + term)
p("Unexported terms:")
for el in findAll("[data-noexport]", self):
for term in config.linkTextsFromElement(el):
p(" " + term)
def isOpaqueElement(self, el):
if el.tag in self.md.opaqueElements:
return True
if el.get("data-opaque") is not None:
return True
return False
def findImplicitInputFile():
'''
Find what input file the user *probably* wants to use,
by scanning the current folder.
In preference order:
1. index.bs
2. Overview.bs
3. the first file with a .bs extension
4. the first file with a .src.html extension
'''
import glob
import os
if os.path.isfile("index.bs"):
return "index.bs"
if os.path.isfile("Overview.bs"):
return "Overview.bs"
allBs = glob.glob("*.bs")
if allBs:
return allBs[0]
allHtml = glob.glob("*.src.html")
if allHtml:
return allHtml[0]
return None
constants.specClass = Spec
styleMdLists = '''
/* This is a weird hack for me not yet following the commonmark spec
regarding paragraph and lists. */
[data-md] > :first-child {
margin-top: 0;
}
[data-md] > :last-child {
margin-bottom: 0;
}'''
styleAutolinks = '''
.css.css, .property.property, .descriptor.descriptor {
color: #005a9c;
font-size: inherit;
font-family: inherit;
}
.css::before, .property::before, .descriptor::before {
content: "‘";
}
.css::after, .property::after, .descriptor::after {
content: "’";
}
.property, .descriptor {
/* Don't wrap property and descriptor names */
white-space: nowrap;
}
.type { /* CSS value <type> */
font-style: italic;
}
pre .property::before, pre .property::after {
content: "";
}
[data-link-type="property"]::before,
[data-link-type="propdesc"]::before,
[data-link-type="descriptor"]::before,
[data-link-type="value"]::before,
[data-link-type="function"]::before,
[data-link-type="at-rule"]::before,
[data-link-type="selector"]::before,
[data-link-type="maybe"]::before {
content: "‘";
}
[data-link-type="property"]::after,
[data-link-type="propdesc"]::after,
[data-link-type="descriptor"]::after,
[data-link-type="value"]::after,
[data-link-type="function"]::after,
[data-link-type="at-rule"]::after,
[data-link-type="selector"]::after,
[data-link-type="maybe"]::after {
content: "’";
}
[data-link-type].production::before,
[data-link-type].production::after,
.prod [data-link-type]::before,
.prod [data-link-type]::after {
content: "";
}
[data-link-type=element],
[data-link-type=element-attr] {
font-family: Menlo, Consolas, "DejaVu Sans Mono", monospace;
font-size: .9em;
}
[data-link-type=element]::before { content: "<" }
[data-link-type=element]::after { content: ">" }
[data-link-type=biblio] {
white-space: pre;
}'''
styleSelflinks = '''
.heading, .issue, .note, .example, li, dt {
position: relative;
}
a.self-link {
position: absolute;
top: 0;
left: calc(-1 * (3.5rem - 26px));
width: calc(3.5rem - 26px);
height: 2em;
text-align: center;
border: none;
transition: opacity .2s;
opacity: .5;
}
a.self-link:hover {
opacity: 1;
}
.heading > a.self-link {
font-size: 83%;
}
li > a.self-link {
left: calc(-1 * (3.5rem - 26px) - 2em);
}
dfn > a.self-link {
top: auto;
left: auto;
opacity: 0;
width: 1.5em;
height: 1.5em;
background: gray;
color: white;
font-style: normal;
transition: opacity .2s, background-color .2s, color .2s;
}
dfn:hover > a.self-link {
opacity: 1;
}
dfn > a.self-link:hover {
color: black;
}
a.self-link::before { content: "¶"; }
.heading > a.self-link::before { content: "§"; }
dfn > a.self-link::before { content: "#"; }'''
styleCounters = '''
body {
counter-reset: example figure issue;
}
.issue {
counter-increment: issue;
}
.issue:not(.no-marker)::before {
content: "Issue " counter(issue);
}
.example {
counter-increment: example;
}
.example:not(.no-marker)::before {
content: "Example " counter(example);
}
.invalid.example:not(.no-marker)::before,
.illegal.example:not(.no-marker)::before {
content: "Invalid Example" counter(example);
}
figcaption {
counter-increment: figure;
}
figcaption:not(.no-marker)::before {
content: "Figure " counter(figure) " ";
}'''
|
EnigmaStateManager.py
|
import threading
import time
from queue import Queue
class EnigmaStateManager:
def __init__(self,num_worker_threads=5):
self.machineStateTable={}
self.workRequestMap={}
self.workQueue=Queue()
self.num_worker_threads=num_worker_threads
self.finished=False
self.run()
def processWorkQueue(self):
print("checking queue ")
while not self.finished:
workRequest=self.workQueue.get()
self.generateState(workRequest)
self.workQueue.task_done()
time.sleep(0.5)
def generateState(self,workRequest):
mc=workRequest["MC"]
mcId=workRequest["MCID"]
inputBlk=range(mc.getCipherRotorsSize())
print("processing work request")
for i in range(workRequest["STCount"]):
outputBlk=mc.processKeyListPress(inputBlk)
stateMap=self.createStateMap(inputBlk,outputBlk)
self.addMachineStateToTable(mcId,i,stateMap)
def createStateMap(self,inputSeq,outputSeq):
result={}
for i in range(len(inputSeq)):
result[i]=outputSeq[i]
return result
def run(self):
for i in range(self.num_worker_threads):
t = threading.Thread(target=self.processWorkQueue)
t.daemon = True
t.start()
def retreiveMachineState(self,machineId,stateNumber):
entryID=self.getEntryId(machineId,stateNumber)
if entryID in self.machineStateTable:
return self.machineStateTable[entryID]
else:
"""request already in queue , need to wait till it's ready"""
if machineId in self.workRequestMap and stateNumber <= self.workRequestMap[machineId]:
while entryID not in self.machineStateTable:
time.sleep(0.1)
return self.machineStateTable[entryID]
else:
raise ("No workRequest in workQueue for this machine state !!")
def generateMachineState(self,machineId,machine,generatedStepsCount):
request={}
request["MCID"]=machineId
request["STCount"]=generatedStepsCount
request["MC"]=machine
self.workQueue.put(request)
if machineId in self.workRequestMap:
lastStep=self.workRequestMap[machineId]
lastStep+=generatedStepsCount
self.workRequestMap[machineId]=lastStep
else:
self.workRequestMap[machineId]=generatedStepsCount
def addMachineStateToTable(self,machineId,stateNumber,state):
if machineId not in self.machineStateTable:
self.machineStateTable[machineId]={}
entry=self.machineStateTable[machineId]
entry[stateNumber]=state
def getEntryId(self,machineId,stateNumber):
return str(machineId)+"|"+str(stateNumber)
|
fsspec_utils.py
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
from threading import Thread
import numpy as np
from pyarrow import parquet as pq
try:
import cudf
from cudf.core.column import as_column, build_categorical_column
except ImportError:
cudf = None
#
# Parquet-Specific Utilities
#
def _optimized_read_partition_remote(
fs, pieces, columns, index, categories=(), partitions=(), **kwargs
):
# This is a specialized version of `CudfEngine.read_partition`
# for remote filesystems. This implementation is intended to
# replace the upstream `read_partition` classmethod until
# remote-filesystem handling is optimized in cudf/dask-cudf
if columns is not None:
columns = list(columns)
if isinstance(index, list):
columns += index
# Check that this is a single-piece read on a non-local filesystem
if not isinstance(pieces, list):
pieces = [pieces]
if len(pieces) > 1:
raise ValueError(
"The `_custom_read_partition` code path is not designed to "
"handle a multi-element `pieces` argument."
)
if cudf.utils.ioutils._is_local_filesystem(fs):
raise ValueError(
"The `_custom_read_partition` code path is not intended "
"for use on local filesystems."
)
# Unpack contents of the single piece
if isinstance(pieces[0], str):
path = pieces[0]
row_group = None
partition_keys = []
else:
(path, row_group, partition_keys) = pieces[0]
# Call optimized read utility
df = _optimized_read_remote(path, row_group, columns, fs, **kwargs)
#
# Code below is directly copied from cudf-21.08
#
if index and (index[0] in df.columns):
df = df.set_index(index[0])
elif index is False and set(df.index.names).issubset(columns):
# If index=False, we need to make sure all of the
# names in `columns` are actually in `df.columns`
df.reset_index(inplace=True)
if partition_keys:
if partitions is None:
raise ValueError("Must pass partition sets")
for i, (name, index2) in enumerate(partition_keys):
categories = [val.as_py() for val in partitions.levels[i].dictionary]
col = as_column(index2).as_frame().repeat(len(df))._data[None]
df[name] = build_categorical_column(
categories=categories,
codes=as_column(col.base_data, dtype=col.dtype),
size=col.size,
offset=col.offset,
ordered=False,
)
return df
def _optimized_read_remote(path, row_groups, columns, fs, **kwargs):
if row_groups is not None and not isinstance(row_groups, list):
row_groups = [row_groups]
# Get byte-ranges that are known to contain the
# required data for this read
byte_ranges, footer, file_size = _get_parquet_byte_ranges(
path, row_groups, columns, fs, **kwargs
)
# Call cudf.read_parquet on the dummy buffer
strings_to_cats = kwargs.get("strings_to_categorical", False)
return cudf.read_parquet(
# Wrap in BytesIO since cudf will sometimes use
# pyarrow to parse the metadata (and pyarrow
# cannot read from a bytes object)
io.BytesIO(
# Transfer the required bytes with fsspec
_fsspec_data_transfer(
path,
fs,
byte_ranges=byte_ranges,
footer=footer,
file_size=file_size,
add_par1_magic=True,
**kwargs,
)
),
engine="cudf",
columns=columns,
row_groups=row_groups,
strings_to_categorical=strings_to_cats,
**kwargs.get("read", {}),
)
def _get_parquet_byte_ranges(
path,
rgs,
columns,
fs,
bytes_per_thread=256_000_000,
**kwargs,
):
# The purpose of this utility is to return a list
# of byte ranges (in path) that are known to contain
# the data needed to read `columns` and `rgs`
# Step 0 - Get size of file
file_size = fs.size(path)
# Return early if the file is too small to merit
# optimized data transfer
if file_size <= bytes_per_thread:
return None, None, file_size
# Step 1 - Get 32 KB from tail of file.
#
# This "sample size" can be tunable, but should
# always be >= 8 bytes (so we can read the footer size)
tail_size = 32_000
footer_sample = fs.tail(path, tail_size)
# Step 2 - Read the footer size and re-read a larger
# tail if necessary
footer_size = int.from_bytes(footer_sample[-8:-4], "little")
if tail_size < (footer_size + 8):
footer_sample = fs.tail(path, footer_size + 8)
# Step 3 - Collect required byte ranges
byte_ranges = []
md = pq.ParquetFile(io.BytesIO(footer_sample)).metadata
for r in range(md.num_row_groups):
# Skip this row-group if we are targeting
# specific row-groups
if rgs is None or r in rgs:
row_group = md.row_group(r)
for c in range(row_group.num_columns):
column = row_group.column(c)
name = column.path_in_schema
# Skip this column if we are targeting
# specific columns, and this name is not
# in the list.
#
# Note that `column.path_in_schema` may
# modify the column name for list and struct
# columns. For example, a column named "a"
# may become "a.list.element"
split_name = name.split(".")[0]
if columns is None or name in columns or split_name in columns:
file_offset0 = column.dictionary_page_offset
if file_offset0 is None:
file_offset0 = column.data_page_offset
num_bytes = column.total_compressed_size
byte_ranges.append((file_offset0, num_bytes))
return byte_ranges, footer_sample, file_size
#
# General Fsspec Data-transfer Optimization Code
#
def _fsspec_data_transfer(
path_or_fob,
fs,
byte_ranges=None,
footer=None,
file_size=None,
add_par1_magic=None,
bytes_per_thread=256_000_000,
max_gap=64_000,
mode="rb",
**kwargs,
):
# Calculate total file size
file_size = file_size or fs.size(path_or_fob)
# Check if a direct read makes the most sense
if not byte_ranges and bytes_per_thread >= file_size:
return fs.open(path_or_fob, mode=mode, cache_type="none").read()
# Threaded read into "dummy" buffer
buf = np.zeros(file_size, dtype="b")
if byte_ranges:
# Optimize/merge the ranges
byte_ranges = _merge_ranges(
byte_ranges,
max_block=bytes_per_thread,
max_gap=max_gap,
)
# Call multi-threaded data transfer of
# remote byte-ranges to local buffer
_read_byte_ranges(
path_or_fob,
byte_ranges,
buf,
fs,
**kwargs,
)
# Add Header & Footer bytes
if footer is not None:
footer_size = len(footer)
buf[-footer_size:] = np.frombuffer(footer[-footer_size:], dtype="b")
# Add parquet magic bytes (optional)
if add_par1_magic:
buf[:4] = np.frombuffer(b"PAR1", dtype="b")
if footer is None:
buf[-4:] = np.frombuffer(b"PAR1", dtype="b")
else:
byte_ranges = [
(b, min(bytes_per_thread, file_size - b)) for b in range(0, file_size, bytes_per_thread)
]
_read_byte_ranges(
path_or_fob,
byte_ranges,
buf,
fs,
**kwargs,
)
return buf.tobytes()
def _merge_ranges(byte_ranges, max_block=256_000_000, max_gap=64_000):
# Simple utility to merge small/adjacent byte ranges
new_ranges = []
if not byte_ranges:
# Early return
return new_ranges
offset, size = byte_ranges[0]
for (new_offset, new_size) in byte_ranges[1:]:
gap = new_offset - (offset + size)
if gap > max_gap or (size + new_size + gap) > max_block:
# Gap is too large or total read is too large
new_ranges.append((offset, size))
offset = new_offset
size = new_size
continue
size += new_size + gap
new_ranges.append((offset, size))
return new_ranges
def _assign_block(fs, path_or_fob, local_buffer, offset, nbytes):
with fs.open(path_or_fob, mode="rb", cache_type="none") as fob:
fob.seek(offset)
local_buffer[offset : offset + nbytes] = np.frombuffer(
fob.read(nbytes),
dtype="b",
)
def _read_byte_ranges(
path_or_fob,
ranges,
local_buffer,
fs,
**kwargs,
):
workers = []
for (offset, nbytes) in ranges:
if len(ranges) > 1:
workers.append(
Thread(target=_assign_block, args=(fs, path_or_fob, local_buffer, offset, nbytes))
)
workers[-1].start()
else:
_assign_block(fs, path_or_fob, local_buffer, offset, nbytes)
for worker in workers:
worker.join()
|
cache.py
|
# ██╗ ██╗██████╗ ███╗ ███╗███████╗ █████╗ ██╗
# ██║ ██║██╔══██╗████╗ ████║██╔════╝██╔══██╗██║
# ███████║██║ ██║██╔████╔██║█████╗ ███████║██║
# ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ ██╔══██║██║
# ██║ ██║██████╔╝██║ ╚═╝ ██║███████╗██║ ██║███████╗
# ╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚══════╝
# Copyright 2019, Hyungyo Seo
# modules/cache.py - 캐시를 관리하는 스크립트입니다.
import datetime
import json
import os
from collections import OrderedDict
from threading import Thread
from modules import TTParser, getData
from modules import log
# 캐시 비우기
def purge(req_id, debugging):
dict_data = OrderedDict()
try:
file_list = [file for file in os.listdir("data/cache/") if file.endswith(".json")]
for file in file_list:
os.remove("data/cache/" + file)
except Exception as error:
log.err("[#%s] purge@modules/cache.py: Failed" % req_id)
if debugging:
dict_data["status"] = error
dict_data["status"] = "Error"
return dict_data
dict_data["status"] = "OK"
log.info("[#%s] purge@modules/cache.py: Succeeded" % req_id)
return dict_data
# 캐시정보 가져오기
def get(req_id, debugging):
filenames = os.listdir('data/cache/')
return_data = str()
for filename in filenames:
ext = os.path.splitext(filename)[-1]
# 시간표와 수온, 날씨 캐시 파일 숨김
if ext == '.json' and filename != "TT.json" and filename != "wtemp.json" and filename != "weather.json":
if debugging:
print(filename)
return_data = "%s\n%s" % (return_data, filename.replace(".json", ""))
# 시간표 캐시 만료기한 조회
if "TT.json" in filenames:
with open('data/cache/TT.json', encoding="utf-8") as data: # 캐시 읽기
timestamp = datetime.datetime.fromtimestamp(json.load(data)["Timestamp"])
if (datetime.datetime.now() - timestamp) < datetime.timedelta(hours=3): # 캐시 만료됐는지 확인
time_left = int((datetime.timedelta(hours=3) - (datetime.datetime.now() - timestamp)).seconds / 60)
return_data = "%s\n시간표 캐시 만료까지 %s분 남음" % (return_data, time_left)
else:
return_data = "%s\n시간표 캐시 만료됨" % return_data
# 한강 수온 캐시 만료기한 조회
if "wtemp.json" in filenames:
with open('data/cache/wtemp.json', encoding="utf-8") as data: # 캐시 읽기
timestamp = datetime.datetime.fromtimestamp(json.load(data)["timestamp"])
if (datetime.datetime.now() - timestamp) < datetime.timedelta(minutes=76): # 캐시 만료됐는지 확인
time_left = int((datetime.timedelta(minutes=76) - (datetime.datetime.now() - timestamp)).seconds / 60)
return_data = "%s\n한강 수온 캐시 만료까지 %s분 남음" % (return_data, time_left)
else:
return_data = "%s\n한강 수온 캐시 만료됨" % return_data
# 날씨 캐시 만료기한 조회
if "weather.json" in filenames:
with open('data/cache/weather.json', encoding="utf-8") as data: # 캐시 읽기
timestamp = datetime.datetime.fromtimestamp(json.load(data)["Timestamp"])
if (datetime.datetime.now() - timestamp) < datetime.timedelta(hours=1): # 캐시 만료됐는지 확인
time_left = int((datetime.timedelta(hours=1) - (datetime.datetime.now() - timestamp)).seconds / 60)
return_data = "%s\n날씨 캐시 만료까지 %s분 남음" % (return_data, time_left)
else:
return_data = "%s\n날씨 캐시 만료됨" % return_data
log.info("[#%s] get@modules/cache.py: Succeeded" % req_id)
return return_data
def health_check(req_id, debugging):
global status_tt, status_wtemp, status_weather
filenames = os.listdir('data/cache/')
now = datetime.datetime.now()
# 시간표 캐시 만료기한 조회
def check_tt():
global status_tt
if "TT.json" in filenames:
with open('data/cache/TT.json', encoding="utf-8") as data: # 캐시 읽기
timestamp = datetime.datetime.fromtimestamp(json.load(data)["Timestamp"])
if (datetime.datetime.now() - timestamp) < datetime.timedelta(hours=3): # 캐시 만료됐는지 확인
time_left = int((datetime.timedelta(hours=3) - (datetime.datetime.now() - timestamp)).seconds / 60)
status_tt = "Vaild (Up to %s Min(s))" % time_left
else:
TTParser.parse(1, 1, now.year, now.month, now.day, req_id, debugging)
status_tt = "Expired (But Now Created)"
else:
TTParser.parse(1, 1, now.year, now.month, now.day, req_id, debugging)
status_tt = "NotFound (But Now Created)"
# 한강 수온 캐시 만료기한 조회
def check_wtemp():
global status_wtemp
if "wtemp.json" in filenames:
with open('data/cache/wtemp.json', encoding="utf-8") as data: # 캐시 읽기
timestamp = datetime.datetime.fromtimestamp(json.load(data)["timestamp"])
if (datetime.datetime.now() - timestamp) < datetime.timedelta(minutes=76): # 캐시 만료됐는지 확인
time_left = int(
(datetime.timedelta(minutes=76) - (datetime.datetime.now() - timestamp)).seconds / 60)
status_wtemp = "Vaild (Up to %s Min(s))" % time_left
else:
getData.wtemp(req_id, debugging)
status_wtemp = "Expired (But Now Created)"
else:
getData.wtemp(req_id, debugging)
status_wtemp = "NotFound (But Now Created)"
# 날씨 캐시 만료기한 조회
def check_weather():
global status_weather
if "weather.json" in filenames:
with open('data/cache/weather.json', encoding="utf-8") as data: # 캐시 읽기
timestamp = datetime.datetime.fromtimestamp(json.load(data)["Timestamp"])
if (datetime.datetime.now() - timestamp) < datetime.timedelta(hours=1): # 캐시 만료됐는지 확인
time_left = int((datetime.timedelta(hours=1) - (datetime.datetime.now() - timestamp)).seconds / 60)
status_weather = "Vaild (Up to %s Min(s))" % time_left
else:
getData.weather(None, req_id, debugging)
status_weather = "Expired (But Now Created)"
else:
getData.weather(None, req_id, debugging)
status_weather = "NotFound (But Now Created)"
# 쓰레드 정의
th_tt = Thread(target=check_tt)
th_wtemp = Thread(target=check_wtemp)
th_weather = Thread(target=check_weather)
# 쓰레드 실행
th_tt.start()
th_wtemp.start()
th_weather.start()
# 전 쓰레드 종료 시까지 기다리기
th_tt.join()
th_wtemp.join()
th_weather.join()
return {"Timetable": status_tt, "HanRiverTemperature": status_wtemp, "Weather": status_weather}
|
test.py
|
#!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import optparse
import os
from os.path import join, dirname, abspath, basename, isdir, exists
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
from Queue import Queue, Empty
sys.path.append(dirname(__file__) + "/../deps/v8/tools");
import utils
VERBOSE = False
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases):
self.cases = cases
self.queue = Queue(len(cases))
for case in cases:
self.queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.crashed = 0
self.terminate = False
self.lock = threading.Lock()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[])
threads.append(thread)
thread.start()
try:
self.RunSingle()
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.terminate = True
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self):
while not self.terminate:
try:
test = self.queue.get_nowait()
except Empty:
return
case = test.case
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = time.time()
output = case.Run()
case.duration = (time.time() - start)
except IOError, e:
assert self.terminate
return
if self.terminate:
return
self.lock.acquire()
if output.UnexpectedOutput():
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, templates):
super(CompactProgressIndicator, self).__init__(cases)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED ---"
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'mono': MonochromeProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, mode):
self.path = path
self.context = context
self.duration = None
self.mode = mode
def IsNegative(self):
return False
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode))
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def Run(self):
self.BeforeRun()
try:
result = self.RunCommand(self.GetCommand())
finally:
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
def HasTimedOut(self):
return self.output.timed_out;
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE;
if utils.IsWindows():
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX;
prev_error_mode = Win32SetErrorMode(error_mode);
Win32SetErrorMode(error_mode | prev_error_mode);
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
try:
os.unlink(name)
except OSError, e:
PrintError("os.unlink() " + str(e))
def Execute(args, context, timeout=None):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
)
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def ExecuteNoCapture(args, context, timeout=None):
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
)
return CommandOutput(exit_code, False, "", "")
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[]]
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, mode):
for v in VARIANT_FLAGS:
tests = self.GetConfiguration(context).ListTests(current_path, path, mode)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, mode):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, mode)
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : []}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
def GetVm(self, mode):
name = './launchtest.sh'
# Currently GYP does not support output_dir for MSVS.
# http://code.google.com/p/gyp/issues/detail?id=40
# It will put the builds into Release/node.exe or Debug/node.exe
if utils.IsWindows():
out_dir = os.path.join(dirname(__file__), "..", "out")
if not exists(out_dir):
if mode == 'debug':
name = os.path.abspath('Debug/node.exe')
else:
name = os.path.abspath('Release/node.exe')
else:
name = os.path.abspath(name + '.exe')
return name
def GetVmCommand(self, testcase, mode):
return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
def GetVmFlags(self, testcase, mode):
return testcase.variant_flags + FLAGS[mode]
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[mode]
def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
return progress.Run(tasks)
def BuildRequirements(context, requirements, mode, scons_flags):
command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
+ requirements
+ scons_flags)
output = ExecuteNoCapture(command_line, context)
return output.exit_code == 0
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
print "Malformed line: '%s'." % line
return False
return True
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("-S", dest="scons_flags", help="Flag to pass through to scons",
default=[], action="append")
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=True, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=60, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--simulator", help="Run tests with architecture simulator",
default='none')
result.add_option("--special-command", default=None)
result.add_option("--use-http1", help="Pass --use-http1 switch to node",
default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.mode = options.mode.split(',')
for mode in options.mode:
if not mode in ['debug', 'release']:
print "Unknown mode %s" % mode
return False
if options.simulator != 'none':
# Simulator argument was set. Make sure arch and simulator agree.
if options.simulator != options.arch:
if options.arch == 'none':
options.arch = options.simulator
else:
print "Architecture %s does not match sim %s" %(options.arch, options.simulator)
return False
# Ensure that the simulator argument is handed down to scons.
options.scons_flags.append("simulator=" + options.simulator)
else:
# If options.arch is not set by the command line and no simulator setting
# was found, set the arch to the guess.
if options.arch == 'none':
options.arch = ARCH_GUESS
options.scons_flags.append("arch=" + options.arch)
if options.snapshot:
options.scons_flags.append("snapshot=on")
return True
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFlaky(o):
return (PASS in o) and (FAIL in o) and (not CRASH in o) and (not OKAY in o)
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not SKIP in c.outcomes]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
prefix = urllib.unquote(value[:pos]).split()
suffix = urllib.unquote(value[pos+1:]).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
BUILT_IN_TESTS = ['simple', 'pummel', 'message', 'internet', 'gc']
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
#workspace = abspath(join(dirname(sys.argv[0]), '..'))
workspace = abspath(join(dirname(sys.argv[0]), '.'))
suites = GetSuites(join(workspace, 'test'))
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
if len(args) == 0:
paths = [SplitPath(t) for t in BUILT_IN_TESTS]
else:
paths = [ ]
for arg in args:
path = SplitPath(arg)
paths.append(path)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
shell = abspath(options.shell)
buildspace = dirname(shell)
processor = GetSpecialCommandProcessor(options.special_command)
if options.use_http1:
def wrap(processor):
return lambda args: processor(args[:1] + ['--use-http1'] + args[1:])
processor = wrap(processor)
context = Context(workspace,
buildspace,
VERBOSE,
shell,
options.timeout,
processor,
options.suppress_dialogs,
options.store_unexpected_output)
# First build the required targets
if not options.no_build:
reqs = [ ]
for path in paths:
reqs += root.GetBuildRequirements(path, context)
reqs = list(set(reqs))
if len(reqs) > 0:
if options.j != 1:
options.scons_flags += ['-j', str(options.j)]
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for mode in options.mode:
if not exists(context.GetVm(mode)):
print "Can't find shell executable: '%s'" % context.GetVm(mode)
continue
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': options.arch,
'simulator': options.simulator
}
test_list = root.ListTests([], path, context, mode)
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = config.ClassifyTests(test_list, env)
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = globally_unused_rules.intersection(unused_rules)
all_cases += cases
all_unused.append(unused_rules)
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
if options.report:
PrintReport(all_cases)
result = None
def DoSkip(case):
return SKIP in case.outcomes or SLOW in case.outcomes
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 0
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
|
asyncclientproxy.py
|
'''
@author: Deniz Altinbuken, Emin Gun Sirer
@note: ConCoord Client Proxy
@copyright: See LICENSE
'''
import socket, os, sys, time, random, threading, select
from threading import Thread, Condition, RLock, Lock
import pickle
from concoord.pack import *
from concoord.enums import *
from concoord.utils import *
from concoord.exception import *
from concoord.connection import ConnectionPool, Connection
from concoord.message import *
from concoord.pvalue import PValueSet
try:
import dns
import dns.resolver
import dns.exception
except:
print("Install dnspython: http://www.dnspython.org/")
class ReqDesc:
def __init__(self, clientproxy, args, token):
# acquire a unique command number
self.commandnumber = clientproxy.commandnumber
clientproxy.commandnumber += 1
self.cm = create_message(MSG_CLIENTREQUEST, clientproxy.me,
{FLD_PROPOSAL: Proposal(clientproxy.me, self.commandnumber, args),
FLD_TOKEN: token,
FLD_CLIENTBATCH: False,
FLD_SENDCOUNT: 0})
self.reply = None
self.replyarrived = False
self.replyarrivedcond = Condition()
self.sendcount = 0
def __str__(self):
return "Request Descriptor for cmd %d\nMessage %s\nReply %s" % (self.commandnumber, str(self.cm), self.reply)
class ClientProxy():
def __init__(self, bootstrap, timeout=60, debug=False, token=None):
self.debug = debug
self.timeout = timeout
self.domainname = None
self.token = token
self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.writelock = Lock()
self.bootstraplist = self.discoverbootstrap(bootstrap)
if len(self.bootstraplist) == 0:
raise ConnectionError("No bootstrap found")
if not self.connecttobootstrap():
raise ConnectionError("Cannot connect to any bootstrap")
myaddr = findOwnIP()
myport = self.socket.getsockname()[1]
self.me = Peer(myaddr,myport,NODE_CLIENT)
self.commandnumber = random.randint(1, sys.maxint)
# synchronization
self.lock = Lock()
self.pendingops = {}
self.writelock = Lock()
self.needreconfig = False
self.outstanding = []
# spawn thread, invoke recv_loop
recv_thread = Thread(target=self.recv_loop, name='ReceiveThread')
recv_thread.start()
def _getipportpairs(self, bootaddr, bootport):
for node in socket.getaddrinfo(bootaddr, bootport, socket.AF_INET, socket.SOCK_STREAM):
yield (node[4][0],bootport)
def getbootstrapfromdomain(self, domainname):
tmpbootstraplist = []
try:
answers = dns.resolver.query('_concoord._tcp.'+domainname, 'SRV')
for rdata in answers:
for peer in self._getipportpairs(str(rdata.target), rdata.port):
if peer not in tmpbootstraplist:
tmpbootstraplist.append(peer)
except (dns.resolver.NXDOMAIN, dns.exception.Timeout):
if self.debug:
print "Cannot resolve name"
return tmpbootstraplist
def discoverbootstrap(self, givenbootstrap):
tmpbootstraplist = []
try:
for bootstrap in givenbootstrap.split(","):
bootstrap = bootstrap.strip()
# The bootstrap list is read only during initialization
if bootstrap.find(":") >= 0:
bootaddr,bootport = bootstrap.split(":")
for peer in self._getipportpairs(bootaddr, int(bootport)):
if peer not in tmpbootstraplist:
tmpbootstraplist.append(peer)
else:
self.domainname = bootstrap
tmpbootstraplist = self.getbootstrapfromdomain(self.domainname)
except ValueError:
if self.debug:
print "bootstrap usage: ipaddr1:port1,ipaddr2:port2 or domainname"
self._graceexit()
return tmpbootstraplist
def connecttobootstrap(self):
connected = False
for boottuple in self.bootstraplist:
try:
self.socket.close()
self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.socket.connect(boottuple)
self.conn = Connection(self.socket)
self.conn.settimeout(CLIENTRESENDTIMEOUT)
self.bootstrap = boottuple
connected = True
if self.debug:
print "Connected to new bootstrap: ", boottuple
break
except socket.error, e:
if self.debug:
print e
continue
return connected
def trynewbootstrap(self):
if self.domainname:
self.bootstraplist = self.getbootstrapfromdomain(self.domainname)
else:
oldbootstrap = self.bootstraplist.pop(0)
self.bootstraplist.append(oldbootstrap)
return self.connecttobootstrap()
def invoke_command_async(self, *args):
# create a request descriptor
reqdesc = ReqDesc(self, args, self.token)
# send the clientrequest
with self.writelock:
success = self.conn.send(reqdesc.cm)
self.pendingops[reqdesc.commandnumber] = reqdesc
# if the message is not sent, we should reconfigure
# and send it without making the client wait
if not success:
self.outstanding.append(reqdesc)
self.needreconfig = not success
return reqdesc
def wait_until_command_done(self, reqdesc):
with reqdesc.replyarrivedcond:
while not reqdesc.replyarrived:
reqdesc.replyarrivedcond.wait()
if reqdesc.reply.replycode == CR_OK:
return reqdesc.reply.reply
elif reqdesc.reply.replycode == CR_EXCEPTION:
raise Exception(reqdesc.reply.reply)
else:
return "Unexpected Client Reply Code: %d" % reqdesc.reply.replycode
def recv_loop(self, *args):
while True:
try:
for reply in self.conn.received_bytes():
if reply and reply.type == MSG_CLIENTREPLY:
# received a reply
reqdesc = self.pendingops[reply.inresponseto]
# Async Clientproxy doesn't support BLOCK and UNBLOCK
if reply.replycode == CR_OK or reply.replycode == CR_EXCEPTION:
# the request is done
reqdesc.reply = reply
with reqdesc.replyarrivedcond:
reqdesc.replyarrived = True
reqdesc.replyarrivedcond.notify()
del self.pendingops[reply.inresponseto]
elif reply.replycode == CR_INPROGRESS:
# the request is not done yet
pass
elif reply.replycode == CR_REJECTED or reply.replycode == CR_LEADERNOTREADY:
# the request should be resent after reconfiguration
with self.lock:
self.outstanding.append(reqdesc)
self.needreconfig = True
else:
print "Unknown Client Reply Code"
except ConnectionError:
self.needreconfig = True
except KeyboardInterrupt:
self._graceexit()
with self.lock:
if self.needreconfig:
if not self.trynewbootstrap():
raise ConnectionError("Cannot connect to any bootstrap")
with self.writelock:
for reqdesc in self.outstanding:
success = self.conn.send(reqdesc.cm)
if success:
self.outstanding.remove(reqdesc)
def _graceexit(self):
os._exit(0)
|
test_recreation.py
|
"""InVEST Recreation model tests."""
import datetime
import glob
import zipfile
import socket
import threading
import unittest
import tempfile
import shutil
import os
import functools
import logging
import json
import queue
import Pyro4
import pygeoprocessing
import pygeoprocessing.testing
import numpy
import pandas
from osgeo import gdal
import taskgraph
from natcap.invest import utils
Pyro4.config.SERIALIZER = 'marshal' # allow null bytes in strings
REGRESSION_DATA = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'recreation')
SAMPLE_DATA = os.path.join(REGRESSION_DATA, 'input')
LOGGER = logging.getLogger('test_recreation')
def _timeout(max_timeout):
"""Timeout decorator, parameter in seconds."""
def timeout_decorator(target):
"""Wrap the original function."""
work_queue = queue.Queue()
result_queue = queue.Queue()
def worker():
"""Read one func,args,kwargs tuple and execute."""
try:
func, args, kwargs = work_queue.get()
result = func(*args, **kwargs)
result_queue.put(result)
except Exception as e:
result_queue.put(e)
raise
work_thread = threading.Thread(target=worker)
work_thread.daemon = True
work_thread.start()
@functools.wraps(target)
def func_wrapper(*args, **kwargs):
"""Closure for function."""
try:
work_queue.put((target, args, kwargs))
result = result_queue.get(timeout=max_timeout)
if isinstance(result, Exception):
raise result
return result
except queue.Empty:
raise RuntimeError("Timeout of %f exceeded" % max_timeout)
return func_wrapper
return timeout_decorator
def _make_empty_files(base_file_list):
"""Create a list of empty files.
Parameters:
base_file_list: a list of paths to empty files to be created.
Returns:
None.
"""
for file_path in base_file_list:
with open(file_path, 'w') as open_file:
open_file.write('')
def _resample_csv(base_csv_path, base_dst_path, resample_factor):
"""Resample (downsize) a csv file by a certain resample factor.
Parameters:
base_csv_path (str): path to the source csv file to be resampled.
base_dst_path (str): path to the destination csv file.
resample_factor (int): the factor used to determined how many rows
should be skipped before writing a row to the destination file.
Returns:
None
"""
with open(base_csv_path, 'r') as read_table:
with open(base_dst_path, 'w') as write_table:
for i, line in enumerate(read_table):
if i % resample_factor == 0:
write_table.write(line)
class TestBufferedNumpyDiskMap(unittest.TestCase):
"""Tests for BufferedNumpyDiskMap."""
def setUp(self):
"""Setup workspace."""
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Delete workspace."""
shutil.rmtree(self.workspace_dir)
def test_basic_operation(self):
"""Recreation test buffered file manager basic ops w/ no buffer."""
from natcap.invest.recreation import buffered_numpy_disk_map
file_manager = buffered_numpy_disk_map.BufferedNumpyDiskMap(
os.path.join(self.workspace_dir, 'test'), 0)
file_manager.append(1234, numpy.array([1, 2, 3, 4]))
file_manager.append(1234, numpy.array([1, 2, 3, 4]))
file_manager.append(4321, numpy.array([-4, -1, -2, 4]))
numpy.testing.assert_equal(
file_manager.read(1234), numpy.array([1, 2, 3, 4, 1, 2, 3, 4]))
numpy.testing.assert_equal(
file_manager.read(4321), numpy.array([-4, -1, -2, 4]))
file_manager.delete(1234)
with self.assertRaises(IOError):
file_manager.read(1234)
class TestRecServer(unittest.TestCase):
"""Tests that set up local rec server on a port and call through."""
def setUp(self):
"""Setup workspace."""
self.workspace_dir = tempfile.mkdtemp()
self.resampled_data_path = os.path.join(
self.workspace_dir, 'resampled_data.csv')
_resample_csv(
os.path.join(SAMPLE_DATA, 'sample_data.csv'),
self.resampled_data_path, resample_factor=10)
def tearDown(self):
"""Delete workspace."""
shutil.rmtree(self.workspace_dir, ignore_errors=True)
def test_hashfile(self):
"""Recreation test for hash of file."""
from natcap.invest.recreation import recmodel_server
file_hash = recmodel_server._hashfile(
self.resampled_data_path, blocksize=2**20, fast_hash=False)
# The exact encoded string that is hashed is dependent on python version,
# with Python 3 including b prefix and \n suffix.
# these hashes are for [py2.7, py3.6]
self.assertIn(file_hash, ['c052e7a0a4c5e528', 'c8054b109d7a9d2a'])
def test_hashfile_fast(self):
"""Recreation test for hash and fast hash of file."""
from natcap.invest.recreation import recmodel_server
file_hash = recmodel_server._hashfile(
self.resampled_data_path, blocksize=2**20, fast_hash=True)
# we can't assert the full hash since it is dependant on the file
# last access time and we can't reliably set that in Python.
# instead we just check that at the very least it ends with _fast_hash
self.assertTrue(file_hash.endswith('_fast_hash'))
def test_year_order(self):
"""Recreation ensure that end year < start year raise ValueError."""
from natcap.invest.recreation import recmodel_server
with self.assertRaises(ValueError):
# intentionally construct start year > end year
recmodel_server.RecModel(
self.resampled_data_path,
2014, 2005, os.path.join(self.workspace_dir, 'server_cache'))
@_timeout(30.0)
def test_workspace_fetcher(self):
"""Recreation test workspace fetcher on a local Pyro4 empty server."""
from natcap.invest.recreation import recmodel_server
from natcap.invest.recreation import recmodel_workspace_fetcher
# Attempt a few connections, we've had this test be flaky on the
# entire suite run which we suspect is because of a race condition
server_launched = False
for _ in range(3):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
sock = None
server_args = {
'hostname': 'localhost',
'port': port,
'raw_csv_point_data_path': self.resampled_data_path,
'cache_workspace': self.workspace_dir,
'min_year': 2010,
'max_year': 2015,
}
server_thread = threading.Thread(
target=recmodel_server.execute, args=(server_args,))
server_thread.daemon = True
server_thread.start()
server_launched = True
break
except:
LOGGER.warn("Can't start server process on port %d", port)
if not server_launched:
self.fail("Server didn't start")
path = "PYRO:natcap.invest.recreation@localhost:%s" % port
LOGGER.info("Local server path %s", path)
recreation_server = Pyro4.Proxy(path)
aoi_path = os.path.join(
SAMPLE_DATA, 'test_aoi_for_subset.shp')
basename = os.path.splitext(aoi_path)[0]
aoi_archive_path = os.path.join(
self.workspace_dir, 'aoi_zipped.zip')
with zipfile.ZipFile(aoi_archive_path, 'w') as myzip:
for filename in glob.glob(basename + '.*'):
myzip.write(filename, os.path.basename(filename))
# convert shapefile to binary string for serialization
zip_file_binary = open(aoi_archive_path, 'rb').read()
date_range = (('2005-01-01'), ('2014-12-31'))
out_vector_filename = 'test_aoi_for_subset_pud.shp'
_, workspace_id = (
recreation_server.calc_photo_user_days_in_aoi(
zip_file_binary, date_range, out_vector_filename))
fetcher_args = {
'workspace_dir': self.workspace_dir,
'hostname': 'localhost',
'port': port,
'workspace_id': workspace_id,
}
try:
recmodel_workspace_fetcher.execute(fetcher_args)
except:
LOGGER.error(
"Server process failed (%s) is_alive=%s",
str(server_thread), server_thread.is_alive())
raise
out_workspace_dir = os.path.join(
self.workspace_dir, 'workspace_zip')
os.makedirs(out_workspace_dir)
workspace_zip_path = os.path.join(
self.workspace_dir, workspace_id + '.zip')
zipfile.ZipFile(workspace_zip_path, 'r').extractall(
out_workspace_dir)
pygeoprocessing.testing.assert_vectors_equal(
aoi_path,
os.path.join(out_workspace_dir, 'test_aoi_for_subset.shp'), 1E-6)
@_timeout(30.0)
def test_empty_server(self):
"""Recreation test a client call to simple server."""
from natcap.invest.recreation import recmodel_server
from natcap.invest.recreation import recmodel_client
empty_point_data_path = os.path.join(
self.workspace_dir, 'empty_table.csv')
open(empty_point_data_path, 'w').close() # touch the file
# attempt to get an open port; could result in race condition but
# will be okay for a test. if this test ever fails because of port
# in use, that's probably why
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
sock = None
server_args = {
'hostname': 'localhost',
'port': port,
'raw_csv_point_data_path': empty_point_data_path,
'cache_workspace': self.workspace_dir,
'min_year': 2004,
'max_year': 2015,
}
server_thread = threading.Thread(
target=recmodel_server.execute, args=(server_args,))
server_thread.daemon = True
server_thread.start()
client_args = {
'aoi_path': os.path.join(
SAMPLE_DATA, 'test_aoi_for_subset.shp'),
'cell_size': 7000.0,
'hostname': 'localhost',
'port': port,
'compute_regression': False,
'start_year': '2005',
'end_year': '2014',
'grid_aoi': False,
'results_suffix': '',
'workspace_dir': self.workspace_dir,
}
recmodel_client.execute(client_args)
# testing for file existence seems reasonable since mostly we are
# testing that a local server starts and a client connects to it
_test_same_files(
os.path.join(REGRESSION_DATA, 'file_list_empty_local_server.txt'),
self.workspace_dir)
def test_local_aggregate_points(self):
"""Recreation test single threaded local AOI aggregate calculation."""
from natcap.invest.recreation import recmodel_server
recreation_server = recmodel_server.RecModel(
self.resampled_data_path, 2005, 2014,
os.path.join(self.workspace_dir, 'server_cache'))
aoi_path = os.path.join(SAMPLE_DATA, 'test_aoi_for_subset.shp')
basename = os.path.splitext(aoi_path)[0]
aoi_archive_path = os.path.join(
self.workspace_dir, 'aoi_zipped.zip')
with zipfile.ZipFile(aoi_archive_path, 'w') as myzip:
for filename in glob.glob(basename + '.*'):
myzip.write(filename, os.path.basename(filename))
# convert shapefile to binary string for serialization
zip_file_binary = open(aoi_archive_path, 'rb').read()
# transfer zipped file to server
date_range = (('2005-01-01'), ('2014-12-31'))
out_vector_filename = 'test_aoi_for_subset_pud.shp'
zip_result, workspace_id = (
recreation_server.calc_photo_user_days_in_aoi(
zip_file_binary, date_range, out_vector_filename))
# unpack result
result_zip_path = os.path.join(self.workspace_dir, 'pud_result.zip')
open(result_zip_path, 'wb').write(zip_result)
zipfile.ZipFile(result_zip_path, 'r').extractall(self.workspace_dir)
result_vector_path = os.path.join(
self.workspace_dir, out_vector_filename)
expected_vector_path = os.path.join(
REGRESSION_DATA, 'test_aoi_for_subset_pud.shp')
pygeoprocessing.testing.assert_vectors_equal(
expected_vector_path, result_vector_path, 1E-6)
# ensure the remote workspace is as expected
workspace_zip_binary = recreation_server.fetch_workspace_aoi(
workspace_id)
out_workspace_dir = os.path.join(self.workspace_dir, 'workspace_zip')
os.makedirs(out_workspace_dir)
workspace_zip_path = os.path.join(out_workspace_dir, 'workspace.zip')
open(workspace_zip_path, 'wb').write(workspace_zip_binary)
zipfile.ZipFile(workspace_zip_path, 'r').extractall(out_workspace_dir)
pygeoprocessing.testing.assert_vectors_equal(
aoi_path,
os.path.join(out_workspace_dir, 'test_aoi_for_subset.shp'), 1E-6)
def test_local_calc_poly_pud(self):
"""Recreation test single threaded local PUD calculation."""
from natcap.invest.recreation import recmodel_server
recreation_server = recmodel_server.RecModel(
self.resampled_data_path,
2005, 2014, os.path.join(self.workspace_dir, 'server_cache'))
date_range = (
numpy.datetime64('2005-01-01'),
numpy.datetime64('2014-12-31'))
poly_test_queue = queue.Queue()
poly_test_queue.put(0)
poly_test_queue.put('STOP')
pud_poly_feature_queue = queue.Queue()
recmodel_server._calc_poly_pud(
recreation_server.qt_pickle_filename,
os.path.join(SAMPLE_DATA, 'test_aoi_for_subset.shp'),
date_range, poly_test_queue, pud_poly_feature_queue)
# assert annual average PUD is the same as regression
self.assertEqual(
83.2, pud_poly_feature_queue.get()[1][0])
def test_local_calc_existing_cached(self):
"""Recreation local PUD calculation on existing quadtree."""
from natcap.invest.recreation import recmodel_server
recreation_server = recmodel_server.RecModel(
self.resampled_data_path,
2005, 2014, os.path.join(self.workspace_dir, 'server_cache'))
recreation_server = None
# This will not generate a new quadtree but instead load existing one
recreation_server = recmodel_server.RecModel(
self.resampled_data_path,
2005, 2014, os.path.join(self.workspace_dir, 'server_cache'))
date_range = (
numpy.datetime64('2005-01-01'),
numpy.datetime64('2014-12-31'))
poly_test_queue = queue.Queue()
poly_test_queue.put(0)
poly_test_queue.put('STOP')
pud_poly_feature_queue = queue.Queue()
recmodel_server._calc_poly_pud(
recreation_server.qt_pickle_filename,
os.path.join(SAMPLE_DATA, 'test_aoi_for_subset.shp'),
date_range, poly_test_queue, pud_poly_feature_queue)
# assert annual average PUD is the same as regression
self.assertEqual(
83.2, pud_poly_feature_queue.get()[1][0])
def test_parse_input_csv(self):
"""Recreation test parsing raw CSV."""
from natcap.invest.recreation import recmodel_server
block_offset_size_queue = queue.Queue()
block_offset_size_queue.put((0, 2**10))
block_offset_size_queue.put('STOP')
numpy_array_queue = queue.Queue()
recmodel_server._parse_input_csv(
block_offset_size_queue, self.resampled_data_path,
numpy_array_queue)
val = numpy_array_queue.get()
# we know what the first date is
self.assertEqual(val[0][0], datetime.date(2013, 3, 16))
@_timeout(30.0)
def test_regression_local_server(self):
"""Recreation base regression test on sample data on local server.
Executes Recreation model all the way through scenario prediction.
With this florida AOI, raster and vector predictors do not
intersect the AOI. This makes for a fast test and incidentally
covers an edge case.
"""
from natcap.invest.recreation import recmodel_client
from natcap.invest.recreation import recmodel_server
# attempt to get an open port; could result in race condition but
# will be okay for a test. if this test ever fails because of port
# in use, that's probably why
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
sock = None
server_args = {
'hostname': 'localhost',
'port': port,
'raw_csv_point_data_path': self.resampled_data_path,
'cache_workspace': self.workspace_dir,
'min_year': 2004,
'max_year': 2015,
'max_points_per_node': 200,
}
server_thread = threading.Thread(
target=recmodel_server.execute, args=(server_args,))
server_thread.daemon = True
server_thread.start()
args = {
'aoi_path': os.path.join(
SAMPLE_DATA, 'local_recreation_aoi_florida_utm18n.shp'),
'cell_size': 40000.0,
'compute_regression': True,
'start_year': '2005',
'end_year': '2014',
'hostname': 'localhost',
'port': port,
'grid_aoi': True,
'grid_type': 'hexagon',
'predictor_table_path': os.path.join(
SAMPLE_DATA, 'predictors.csv'),
'results_suffix': '',
'scenario_predictor_table_path': os.path.join(
SAMPLE_DATA, 'predictors_scenario.csv'),
'workspace_dir': self.workspace_dir,
}
recmodel_client.execute(args)
_assert_regression_results_eq(
args['workspace_dir'],
os.path.join(REGRESSION_DATA, 'file_list_base_florida_aoi.txt'),
os.path.join(args['workspace_dir'], 'scenario_results.shp'),
os.path.join(REGRESSION_DATA, 'local_server_scenario_results.csv'))
def test_all_metrics_local_server(self):
"""Recreation test with all but trivial predictor metrics.
Executes Recreation model all the way through scenario prediction.
With this 'extra_fields_features' AOI, we also cover two edge cases:
1) the AOI has a pre-existing field that the model wishes to create.
2) the AOI has features only covering nodata raster predictor values."""
from natcap.invest.recreation import recmodel_client
from natcap.invest.recreation import recmodel_server
# attempt to get an open port; could result in race condition but
# will be okay for a test. if this test ever fails because of port
# in use, that's probably why
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
sock = None
server_args = {
'hostname': 'localhost',
'port': port,
'raw_csv_point_data_path': self.resampled_data_path,
'cache_workspace': self.workspace_dir,
'min_year': 2008,
'max_year': 2015,
'max_points_per_node': 200,
}
server_thread = threading.Thread(
target=recmodel_server.execute, args=(server_args,))
server_thread.daemon = True
server_thread.start()
args = {
'aoi_path': os.path.join(
SAMPLE_DATA, 'andros_aoi_with_extra_fields_features.shp'),
'compute_regression': True,
'start_year': '2008',
'end_year': '2014',
'grid_aoi': False,
'predictor_table_path': os.path.join(
SAMPLE_DATA, 'predictors_all.csv'),
'scenario_predictor_table_path': os.path.join(
SAMPLE_DATA, 'predictors_all.csv'),
'results_suffix': '',
'workspace_dir': self.workspace_dir,
'hostname': server_args['hostname'],
'port': server_args['port'],
}
recmodel_client.execute(args)
out_grid_vector_path = os.path.join(
args['workspace_dir'], 'predictor_data.shp')
expected_grid_vector_path = os.path.join(
REGRESSION_DATA, 'predictor_data_all_metrics.shp')
_assert_vector_attributes_eq(
out_grid_vector_path, expected_grid_vector_path, 3)
out_scenario_path = os.path.join(
args['workspace_dir'], 'scenario_results.shp')
expected_scenario_path = os.path.join(
REGRESSION_DATA, 'scenario_results_all_metrics.shp')
_assert_vector_attributes_eq(
out_scenario_path, expected_scenario_path, 3)
def test_results_suffix_on_serverside_files(self):
"""Recreation test suffix gets added to files created on server."""
from natcap.invest.recreation import recmodel_client
from natcap.invest.recreation import recmodel_server
# attempt to get an open port; could result in race condition but
# will be okay for a test. if this test ever fails because of port
# in use, that's probably why
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
sock = None
server_args = {
'hostname': 'localhost',
'port': port,
'raw_csv_point_data_path': self.resampled_data_path,
'cache_workspace': self.workspace_dir,
'min_year': 2014,
'max_year': 2015,
'max_points_per_node': 200,
}
server_thread = threading.Thread(
target=recmodel_server.execute, args=(server_args,))
server_thread.daemon = True
server_thread.start()
args = {
'aoi_path': os.path.join(
SAMPLE_DATA, 'andros_aoi_with_extra_fields_features.shp'),
'compute_regression': False,
'start_year': '2014',
'end_year': '2015',
'grid_aoi': False,
'results_suffix': 'hello',
'workspace_dir': self.workspace_dir,
'hostname': server_args['hostname'],
'port': server_args['port'],
}
recmodel_client.execute(args)
self.assertTrue(os.path.exists(
os.path.join(args['workspace_dir'], 'monthly_table_hello.csv')))
self.assertTrue(os.path.exists(
os.path.join(args['workspace_dir'], 'pud_results_hello.shp')))
class TestLocalRecServer(unittest.TestCase):
"""Tests using a local rec server."""
def setUp(self):
"""Setup workspace and server."""
from natcap.invest.recreation import recmodel_server
self.workspace_dir = tempfile.mkdtemp()
self.recreation_server = recmodel_server.RecModel(
os.path.join(SAMPLE_DATA, 'sample_data.csv'),
2005, 2014, os.path.join(self.workspace_dir, 'server_cache'))
def tearDown(self):
"""Delete workspace."""
shutil.rmtree(self.workspace_dir)
def test_local_aoi(self):
"""Recreation test local AOI with local server."""
aoi_path = os.path.join(SAMPLE_DATA, 'test_local_aoi_for_subset.shp')
date_range = (
numpy.datetime64('2010-01-01'),
numpy.datetime64('2014-12-31'))
out_vector_filename = os.path.join(self.workspace_dir, 'pud.shp')
self.recreation_server._calc_aggregated_points_in_aoi(
aoi_path, self.workspace_dir, date_range, out_vector_filename)
output_lines = open(os.path.join(
self.workspace_dir, 'monthly_table.csv'), 'r').readlines()
expected_lines = open(os.path.join(
REGRESSION_DATA, 'expected_monthly_table_for_subset.csv'),
'r').readlines()
if output_lines != expected_lines:
raise ValueError(
"Output table not the same as input.\n"
"Expected:\n%s\nGot:\n%s" % (expected_lines, output_lines))
class RecreationRegressionTests(unittest.TestCase):
"""Regression tests for InVEST Recreation model."""
def setUp(self):
"""Setup workspace directory."""
# this lets us delete the workspace after its done no matter the
# the rest result
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Delete workspace."""
shutil.rmtree(self.workspace_dir)
def test_data_missing_in_predictors(self):
"""Recreation raise exception if predictor data missing."""
from natcap.invest.recreation import recmodel_client
response_vector_path = os.path.join(SAMPLE_DATA, 'andros_aoi.shp')
table_path = os.path.join(
SAMPLE_DATA, 'predictors_data_missing.csv')
with self.assertRaises(ValueError):
recmodel_client._validate_same_projection(
response_vector_path, table_path)
def test_data_different_projection(self):
"""Recreation raise exception if data in different projection."""
from natcap.invest.recreation import recmodel_client
response_vector_path = os.path.join(SAMPLE_DATA, 'andros_aoi.shp')
table_path = os.path.join(
SAMPLE_DATA, 'predictors_wrong_projection.csv')
with self.assertRaises(ValueError):
recmodel_client._validate_same_projection(
response_vector_path, table_path)
def test_different_tables(self):
"""Recreation exception if scenario ids different than predictor."""
from natcap.invest.recreation import recmodel_client
base_table_path = os.path.join(
SAMPLE_DATA, 'predictors_data_missing.csv')
scenario_table_path = os.path.join(
SAMPLE_DATA, 'predictors_wrong_projection.csv')
with self.assertRaises(ValueError):
recmodel_client._validate_same_ids_and_types(
base_table_path, scenario_table_path)
def test_delay_op(self):
"""Recreation coverage of delay op function."""
from natcap.invest.recreation import recmodel_client
# not much to test here but that the function is invoked
# guarantee the time has exceeded since we can't have negative time
last_time = -1.0
time_delay = 1.0
called = [False]
def func():
"""Set `called` to True."""
called[0] = True
recmodel_client.delay_op(last_time, time_delay, func)
self.assertTrue(called[0])
def test_raster_sum_mean_no_nodata(self):
"""Recreation test sum/mean if raster doesn't have nodata defined."""
from natcap.invest.recreation import recmodel_client
# The following raster has no nodata value
raster_path = os.path.join(SAMPLE_DATA, 'no_nodata_raster.tif')
response_vector_path = os.path.join(SAMPLE_DATA, 'andros_aoi.shp')
target_path = os.path.join(self.workspace_dir, "predictor.json")
recmodel_client._raster_sum_mean(
raster_path, "mean", response_vector_path, target_path)
with open(target_path, 'r') as file:
predictor_results = json.load(file)
# These constants were calculated by hand by Dave.
numpy.testing.assert_allclose(
predictor_results['0'], 13.0, rtol=0, atol=1e-6)
def test_raster_sum_mean_nodata(self):
"""Recreation test sum/mean if raster has no valid pixels.
This may be a raster that does not intersect with the AOI, or
one that does intersect, but is entirely nodata within the AOI.
Such a raster is not usable as a predictor variable.
"""
from natcap.invest.recreation import recmodel_client
# The following raster has only nodata pixels.
raster_path = os.path.join(SAMPLE_DATA, 'nodata_raster.tif')
response_vector_path = os.path.join(SAMPLE_DATA, 'andros_aoi.shp')
target_path = os.path.join(self.workspace_dir, "predictor.json")
recmodel_client._raster_sum_mean(
raster_path, "sum", response_vector_path, target_path)
with open(target_path, 'r') as file:
predictor_results = json.load(file)
# Assert that target file was written and it is an empty dictionary
assert(len(predictor_results) == 0)
def test_least_squares_regression(self):
"""Recreation regression test for the least-squares linear model."""
from natcap.invest.recreation import recmodel_client
coefficient_vector_path = os.path.join(
REGRESSION_DATA, 'predictor_data.shp')
response_vector_path = os.path.join(
REGRESSION_DATA, 'predictor_data_pud.shp')
response_id = 'PUD_YR_AVG'
_, coefficients, ssres, r_sq, r_sq_adj, std_err, dof, se_est = (
recmodel_client._build_regression(
response_vector_path, coefficient_vector_path, response_id))
results = {}
results['coefficients'] = coefficients
results['ssres'] = ssres
results['r_sq'] = r_sq
results['r_sq_adj'] = r_sq_adj
results['std_err'] = std_err
results['dof'] = dof
results['se_est'] = se_est
# Dave created these numbers using Recreation model release/3.5.0
expected_results = {}
expected_results['coefficients'] = [
-3.67484238e-03, -8.76864968e-06, 1.75244536e-01, 2.07040116e-01,
6.59076098e-01]
expected_results['ssres'] = 11.03734250869611
expected_results['r_sq'] = 0.5768926587089602
expected_results['r_sq_adj'] = 0.5256069203706524
expected_results['std_err'] = 0.5783294255923199
expected_results['dof'] = 33
expected_results['se_est'] = [
5.93275522e-03, 8.49251058e-06, 1.72921342e-01, 6.39079593e-02,
3.98165865e-01]
for key in expected_results:
numpy.testing.assert_allclose(results[key], expected_results[key])
@unittest.skip("skipping to avoid remote server call (issue #3753)")
def test_base_regression(self):
"""Recreation base regression test on fast sample data.
Executes Recreation model with default data and default arguments.
"""
from natcap.invest.recreation import recmodel_client
args = {
'aoi_path': os.path.join(SAMPLE_DATA, 'andros_aoi.shp'),
'cell_size': 40000.0,
'compute_regression': True,
'start_year': '2005',
'end_year': '2014',
'grid_aoi': True,
'grid_type': 'hexagon',
'predictor_table_path': os.path.join(
SAMPLE_DATA, 'predictors.csv'),
'results_suffix': '',
'scenario_predictor_table_path': os.path.join(
SAMPLE_DATA, 'predictors_scenario.csv'),
'workspace_dir': self.workspace_dir,
}
recmodel_client.execute(args)
_assert_regression_results_eq(
args['workspace_dir'],
os.path.join(REGRESSION_DATA, 'file_list_base.txt'),
os.path.join(args['workspace_dir'], 'scenario_results.shp'),
os.path.join(REGRESSION_DATA, 'scenario_results_40000.csv'))
def test_square_grid_regression(self):
"""Recreation square grid regression test."""
from natcap.invest.recreation import recmodel_client
out_grid_vector_path = os.path.join(
self.workspace_dir, 'square_grid_vector_path.shp')
recmodel_client._grid_vector(
os.path.join(SAMPLE_DATA, 'andros_aoi.shp'), 'square', 20000.0,
out_grid_vector_path)
expected_grid_vector_path = os.path.join(
REGRESSION_DATA, 'square_grid_vector_path.shp')
pygeoprocessing.testing.assert_vectors_equal(
out_grid_vector_path, expected_grid_vector_path, 1E-6)
def test_hex_grid_regression(self):
"""Recreation hex grid regression test."""
from natcap.invest.recreation import recmodel_client
out_grid_vector_path = os.path.join(
self.workspace_dir, 'hex_grid_vector_path.shp')
recmodel_client._grid_vector(
os.path.join(SAMPLE_DATA, 'andros_aoi.shp'), 'hexagon', 20000.0,
out_grid_vector_path)
expected_grid_vector_path = os.path.join(
REGRESSION_DATA, 'hex_grid_vector_path.shp')
pygeoprocessing.testing.assert_vectors_equal(
out_grid_vector_path, expected_grid_vector_path, 1E-6)
@unittest.skip("skipping to avoid remote server call (issue #3753)")
def test_no_grid_regression(self):
"""Recreation base regression on ungridded AOI."""
from natcap.invest.recreation import recmodel_client
args = {
'aoi_path': os.path.join(SAMPLE_DATA, 'andros_aoi.shp'),
'compute_regression': False,
'start_year': '2005',
'end_year': '2014',
'grid_aoi': False,
'results_suffix': '',
'workspace_dir': self.workspace_dir,
}
recmodel_client.execute(args)
expected_result_table = pandas.read_csv(os.path.join(
REGRESSION_DATA, 'expected_monthly_table_for_no_grid.csv'))
result_table = pandas.read_csv(
os.path.join(self.workspace_dir, 'monthly_table.csv'))
pandas.testing.assert_frame_equal(
expected_result_table, result_table, check_dtype=False)
def test_predictor_id_too_long(self):
"""Recreation test ID too long raises ValueError."""
from natcap.invest.recreation import recmodel_client
args = {
'aoi_path': os.path.join(SAMPLE_DATA, 'andros_aoi.shp'),
'compute_regression': True,
'start_year': '2005',
'end_year': '2014',
'grid_aoi': True,
'grid_type': 'square',
'cell_size': 20000,
'predictor_table_path': os.path.join(
SAMPLE_DATA, 'predictors_id_too_long.csv'),
'results_suffix': '',
'workspace_dir': self.workspace_dir,
}
with self.assertRaises(ValueError):
recmodel_client.execute(args)
def test_existing_output_shapefiles(self):
"""Recreation grid test when output files need to be overwritten."""
from natcap.invest.recreation import recmodel_client
out_grid_vector_path = os.path.join(
self.workspace_dir, 'hex_grid_vector_path.shp')
recmodel_client._grid_vector(
os.path.join(SAMPLE_DATA, 'andros_aoi.shp'), 'hexagon', 20000.0,
out_grid_vector_path)
# overwrite output
recmodel_client._grid_vector(
os.path.join(SAMPLE_DATA, 'andros_aoi.shp'), 'hexagon', 20000.0,
out_grid_vector_path)
expected_grid_vector_path = os.path.join(
REGRESSION_DATA, 'hex_grid_vector_path.shp')
pygeoprocessing.testing.assert_vectors_equal(
out_grid_vector_path, expected_grid_vector_path, 1E-6)
def test_existing_regression_coef(self):
"""Recreation test regression coefficients handle existing output."""
from natcap.invest.recreation import recmodel_client
# Initialize a TaskGraph
taskgraph_db_dir = os.path.join(
self.workspace_dir, '_taskgraph_working_dir')
n_workers = -1 # single process mode.
task_graph = taskgraph.TaskGraph(taskgraph_db_dir, n_workers)
response_vector_path = os.path.join(
self.workspace_dir, 'no_grid_vector_path.shp')
response_polygons_lookup_path = os.path.join(
self.workspace_dir, 'response_polygons_lookup.pickle')
recmodel_client._copy_aoi_no_grid(
os.path.join(SAMPLE_DATA, 'andros_aoi.shp'), response_vector_path)
predictor_table_path = os.path.join(SAMPLE_DATA, 'predictors.csv')
# make outputs to be overwritten
predictor_dict = utils.build_lookup_from_csv(predictor_table_path, 'id')
predictor_list = predictor_dict.keys()
tmp_working_dir = tempfile.mkdtemp(dir=self.workspace_dir)
empty_json_list = [
os.path.join(tmp_working_dir, x + '.json') for x in predictor_list]
out_coefficient_vector_path = os.path.join(
self.workspace_dir, 'out_coefficient_vector.shp')
_make_empty_files(
[out_coefficient_vector_path] + empty_json_list)
prepare_response_polygons_task = task_graph.add_task(
func=recmodel_client._prepare_response_polygons_lookup,
args=(response_vector_path,
response_polygons_lookup_path),
target_path_list=[response_polygons_lookup_path],
task_name='prepare response polygons for geoprocessing')
# build again to test against overwriting output
recmodel_client._schedule_predictor_data_processing(
response_vector_path, response_polygons_lookup_path,
prepare_response_polygons_task, predictor_table_path,
out_coefficient_vector_path, tmp_working_dir, task_graph)
expected_coeff_vector_path = os.path.join(
REGRESSION_DATA, 'test_regression_coefficients.shp')
_assert_vector_attributes_eq(
out_coefficient_vector_path, expected_coeff_vector_path, 6)
def test_predictor_table_absolute_paths(self):
"""Recreation test validation from full path."""
from natcap.invest.recreation import recmodel_client
response_vector_path = os.path.join(
self.workspace_dir, 'no_grid_vector_path.shp')
recmodel_client._copy_aoi_no_grid(
os.path.join(SAMPLE_DATA, 'andros_aoi.shp'), response_vector_path)
predictor_table_path = os.path.join(
self.workspace_dir, 'predictors.csv')
# these are absolute paths for predictor data
predictor_list = [
('ports',
os.path.join(SAMPLE_DATA, 'predictors', 'dredged_ports.shp'),
'point_count'),
('airdist',
os.path.join(SAMPLE_DATA, 'predictors', 'airport.shp'),
'point_nearest_distance'),
('bonefish',
os.path.join(SAMPLE_DATA, 'predictors', 'bonefish_simp.shp'),
'polygon_percent_coverage'),
('bathy',
os.path.join(SAMPLE_DATA, 'predictors', 'dem90m_coarse.tif'),
'raster_mean'),
]
with open(predictor_table_path, 'w') as table_file:
table_file.write('id,path,type\n')
for predictor_id, path, predictor_type in predictor_list:
table_file.write(
'%s,%s,%s\n' % (predictor_id, path, predictor_type))
# The expected behavior here is that _validate_same_projection does
# not raise a ValueError. The try/except block makes that explicit
# and also explicitly fails the test if it does. Note if a different
# exception is raised the test will raise an error, thus differentiating
# between a failed test and an error.
try:
recmodel_client._validate_same_projection(
response_vector_path, predictor_table_path)
except ValueError:
self.fail(
"_validate_same_projection raised ValueError unexpectedly!")
def test_year_order(self):
"""Recreation ensure that end year < start year raise ValueError."""
from natcap.invest.recreation import recmodel_client
args = {
'aoi_path': os.path.join(SAMPLE_DATA, 'andros_aoi.shp'),
'cell_size': 7000.0,
'compute_regression': True,
'start_year': '2014', # note start_year > end_year
'end_year': '2005',
'grid_aoi': True,
'grid_type': 'hexagon',
'predictor_table_path': os.path.join(
SAMPLE_DATA, 'predictors.csv'),
'results_suffix': '',
'scenario_predictor_table_path': os.path.join(
SAMPLE_DATA, 'predictors_scenario.csv'),
'workspace_dir': self.workspace_dir,
}
with self.assertRaises(ValueError):
recmodel_client.execute(args)
def test_bad_grid_type(self):
"""Recreation ensure that bad grid type raises ValueError."""
from natcap.invest.recreation import recmodel_client
args = {
'aoi_path': os.path.join(SAMPLE_DATA, 'andros_aoi.shp'),
'cell_size': 7000.0,
'compute_regression': False,
'start_year': '2005',
'end_year': '2014',
'grid_aoi': True,
'grid_type': 'circle', # intentionally bad gridtype
'results_suffix': '',
'workspace_dir': self.workspace_dir,
}
with self.assertRaises(ValueError):
recmodel_client.execute(args)
def test_start_year_out_of_range(self):
"""Recreation that start_year out of range raise ValueError."""
from natcap.invest.recreation import recmodel_client
args = {
'aoi_path': os.path.join(SAMPLE_DATA, 'andros_aoi.shp'),
'cell_size': 7000.0,
'compute_regression': True,
'start_year': '1219', # start year ridiculously out of range
'end_year': '2014',
'grid_aoi': True,
'grid_type': 'hexagon',
'predictor_table_path': os.path.join(
SAMPLE_DATA, 'predictors.csv'),
'results_suffix': '',
'scenario_predictor_table_path': os.path.join(
SAMPLE_DATA, 'predictors_scenario.csv'),
'workspace_dir': self.workspace_dir,
}
with self.assertRaises(ValueError):
recmodel_client.execute(args)
def test_end_year_out_of_range(self):
"""Recreation that end_year out of range raise ValueError."""
from natcap.invest.recreation import recmodel_client
args = {
'aoi_path': os.path.join(SAMPLE_DATA, 'andros_aoi.shp'),
'cell_size': 7000.0,
'compute_regression': True,
'start_year': '2005',
'end_year': '2219', # end year ridiculously out of range
'grid_aoi': True,
'grid_type': 'hexagon',
'predictor_table_path': os.path.join(
SAMPLE_DATA, 'predictors.csv'),
'results_suffix': '',
'scenario_predictor_table_path': os.path.join(
SAMPLE_DATA, 'predictors_scenario.csv'),
'workspace_dir': self.workspace_dir,
}
with self.assertRaises(ValueError):
recmodel_client.execute(args)
class RecreationValidationTests(unittest.TestCase):
"""Tests for the Recreation Model ARGS_SPEC and validation."""
def setUp(self):
"""Create a temporary workspace."""
self.workspace_dir = tempfile.mkdtemp()
self.base_required_keys = [
'workspace_dir',
'aoi_path',
'start_year',
'end_year'
]
def tearDown(self):
"""Remove the temporary workspace after a test."""
shutil.rmtree(self.workspace_dir)
def test_missing_keys(self):
"""Recreation Validate: assert missing required keys."""
from natcap.invest.recreation import recmodel_client
from natcap.invest import validation
validation_errors = recmodel_client.validate({}) # empty args dict.
invalid_keys = validation.get_invalid_keys(validation_errors)
expected_missing_keys = set(self.base_required_keys)
self.assertEqual(invalid_keys, expected_missing_keys)
def test_missing_keys_grid_aoi(self):
"""Recreation Validate: assert missing keys for grid option."""
from natcap.invest.recreation import recmodel_client
from natcap.invest import validation
validation_errors = recmodel_client.validate({'grid_aoi': True})
invalid_keys = validation.get_invalid_keys(validation_errors)
expected_missing_keys = set(
self.base_required_keys + ['grid_type', 'cell_size'])
self.assertEqual(invalid_keys, expected_missing_keys)
def test_missing_keys_compute_regression(self):
"""Recreation Validate: assert missing keys for regression option."""
from natcap.invest.recreation import recmodel_client
from natcap.invest import validation
validation_errors = recmodel_client.validate({'compute_regression': True})
invalid_keys = validation.get_invalid_keys(validation_errors)
expected_missing_keys = set(
self.base_required_keys + ['predictor_table_path'])
self.assertEqual(invalid_keys, expected_missing_keys)
def test_bad_predictor_table_header(self):
"""Recreation Validate: assert messages for bad table headers."""
from natcap.invest.recreation import recmodel_client
table_path = os.path.join(self.workspace_dir, 'table.csv')
with open(table_path, 'w') as file:
file.write('foo,bar,baz\n')
file.write('a,b,c\n')
expected_message = "Fields are missing from this table: ['ID', 'PATH', 'TYPE']"
validation_warnings = recmodel_client.validate({
'compute_regression': True,
'predictor_table_path': table_path,
'start_year': '2012',
'end_year': '2016',
'workspace_dir': self.workspace_dir,
'aoi_path': os.path.join(SAMPLE_DATA, 'andros_aoi.shp')})
self.assertEqual(validation_warnings, [(['predictor_table_path'],
expected_message)])
validation_warnings = recmodel_client.validate({
'compute_regression': True,
'predictor_table_path': table_path,
'scenario_predictor_table_path': table_path,
'start_year': '2012',
'end_year': '2016',
'workspace_dir': self.workspace_dir,
'aoi_path': os.path.join(SAMPLE_DATA, 'andros_aoi.shp')})
self.assertEqual(validation_warnings, [(['predictor_table_path'],
expected_message),
(['scenario_predictor_table_path'],
expected_message)])
def test_validate_predictor_types_whitespace(self):
"""Recreation Validate: assert type validation ignores whitespace"""
from natcap.invest.recreation import recmodel_client
predictor_id = 'dem90m'
raster_path = os.path.join(SAMPLE_DATA, 'predictors/dem90m_coarse.tif')
# include trailing whitespace in the type, this should pass
table_path = os.path.join(self.workspace_dir, 'table.csv')
with open(table_path, 'w') as file:
file.write('id,path,type\n')
file.write(f'{predictor_id},{raster_path},raster_mean \n')
args = {
'aoi_path': os.path.join(SAMPLE_DATA, 'andros_aoi.shp'),
'cell_size': 40000.0,
'compute_regression': True,
'start_year': '2005',
'end_year': '2014',
'grid_aoi': False,
'predictor_table_path': table_path,
'workspace_dir': self.workspace_dir,
}
# there should be no error when the type has trailing whitespace
recmodel_client.execute(args)
output_path = os.path.join(self.workspace_dir, 'regression_coefficients.txt')
# the regression_coefficients.txt output file should contain the
# predictor id, meaning it wasn't dropped from the regression
with open(output_path, 'r') as output_file:
self.assertTrue(predictor_id in ''.join(output_file.readlines()))
def test_validate_predictor_types_incorrect(self):
"""Recreation Validate: assert error on incorrect type value"""
from natcap.invest.recreation import recmodel_client
predictor_id = 'dem90m'
raster_path = os.path.join(SAMPLE_DATA, 'predictors/dem90m_coarse.tif')
# include a typo in the type, this should fail
bad_table_path = os.path.join(self.workspace_dir, 'bad_table.csv')
with open(bad_table_path, 'w') as file:
file.write('id,path,type\n')
file.write(f'{predictor_id},{raster_path},raster?mean\n')
args = {
'aoi_path': os.path.join(SAMPLE_DATA, 'andros_aoi.shp'),
'cell_size': 40000.0,
'compute_regression': True,
'start_year': '2005',
'end_year': '2014',
'grid_aoi': False,
'predictor_table_path': bad_table_path,
'workspace_dir': self.workspace_dir,
}
with self.assertRaises(ValueError) as cm:
recmodel_client.execute(args)
self.assertTrue('The table contains invalid type value(s)' in
str(cm.exception))
def _assert_vector_attributes_eq(
actual_vector_path, expected_vector_path, tolerance_places=3):
"""Assert fieldnames and values are equal with no respect to order."""
try:
actual_vector = gdal.OpenEx(actual_vector_path, gdal.OF_VECTOR)
actual_layer = actual_vector.GetLayer()
expected_vector = gdal.OpenEx(expected_vector_path, gdal.OF_VECTOR)
expected_layer = expected_vector.GetLayer()
assert(
actual_layer.GetFeatureCount() == expected_layer.GetFeatureCount())
field_names = [field.name for field in expected_layer.schema]
for feature in expected_layer:
fid = feature.GetFID()
expected_values = [
feature.GetField(field) for field in field_names]
actual_feature = actual_layer.GetFeature(fid)
actual_values = [
actual_feature.GetField(field) for field in field_names]
for av, ev in zip(actual_values, expected_values):
if av is not None:
numpy.testing.assert_allclose(
av, ev, rtol=0, atol=10**-tolerance_places)
else:
# Could happen when a raster predictor is only nodata
assert(ev is None)
feature = None
actual_feature = None
finally:
actual_layer = None
actual_vector = None
expected_layer = None
expected_vector = None
def _assert_regression_results_eq(
workspace_dir, file_list_path, result_vector_path,
expected_results_path):
"""Test workspace against the expected list of files and results.
Parameters:
workspace_dir (string): path to the completed model workspace
file_list_path (string): path to a file that has a list of all
the expected files relative to the workspace base
result_vector_path (string): path to shapefile
produced by the Recreation model.
expected_results_path (string): path to a csv file that has the
expected results of a scenario prediction model run.
Returns:
None
Raises:
AssertionError if any files are missing or results are out of
range by `tolerance_places`
"""
try:
# Test that the workspace has the same files as we expect
_test_same_files(file_list_path, workspace_dir)
# The tolerance of 3 digits after the decimal was determined by
# experimentation on the application with the given range of
# numbers. This is an apparently reasonable approach as described
# by ChrisF: http://stackoverflow.com/a/3281371/42897
# and even more reading about picking numerical tolerance
# https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
tolerance_places = 3
result_vector = gdal.OpenEx(result_vector_path, gdal.OF_VECTOR)
result_layer = result_vector.GetLayer()
expected_results = pandas.read_csv(expected_results_path, dtype=float)
field_names = list(expected_results)
for feature in result_layer:
values = [feature.GetField(field) for field in field_names]
fid = feature.GetFID()
expected_values = list(expected_results.iloc[fid])
for v, ev in zip(values, expected_values):
if v is not None:
numpy.testing.assert_allclose(
v, ev, rtol=0, atol=10**-tolerance_places)
else:
# Could happen when a raster predictor is only nodata
assert(numpy.isnan(ev))
feature = None
finally:
result_layer = None
gdal.Dataset.__swig_destroy__(result_vector)
result_vector = None
def _test_same_files(base_list_path, directory_path):
"""Assert expected files are in the `directory_path`.
Parameters:
base_list_path (string): a path to a file that has one relative
file path per line.
directory_path (string): a path to a directory whose contents will
be checked against the files listed in `base_list_file`
Returns:
None
Raises:
AssertionError when there are files listed in `base_list_file`
that don't exist in the directory indicated by `path`
"""
missing_files = []
with open(base_list_path, 'r') as file_list:
for file_path in file_list:
full_path = os.path.join(directory_path, file_path.rstrip())
if full_path == '':
# skip blank lines
continue
if not os.path.isfile(full_path):
missing_files.append(full_path)
if len(missing_files) > 0:
raise AssertionError(
"The following files were expected but not found: " +
'\n'.join(missing_files))
|
test_shell_interactive.py
|
#!/usr/bin/env impala-python
# encoding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import httplib
import logging
import os
import pexpect
import pytest
import re
import signal
import socket
import sys
import threading
from time import sleep
from contextlib import closing
# This import is the actual ImpalaShell class from impala_shell.py.
# We rename it to ImpalaShellClass here because we later import another
# class called ImpalaShell from tests/shell/util.py, and we don't want
# to mask it.
from shell.impala_shell import ImpalaShell as ImpalaShellClass
from tempfile import NamedTemporaryFile
from tests.common.impala_service import ImpaladService
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfLocal
from tests.common.test_dimensions import create_client_protocol_dimension
from util import (assert_var_substitution, ImpalaShell, get_impalad_port, get_shell_cmd,
get_open_sessions_metric, IMPALA_SHELL_EXECUTABLE)
import SimpleHTTPServer
import SocketServer
QUERY_FILE_PATH = os.path.join(os.environ['IMPALA_HOME'], 'tests', 'shell')
# Regex to match the interactive shell prompt that is expected after each command.
# Examples: hostname:21000, hostname:21050, hostname:28000
PROMPT_REGEX = r'\[[^:]+:2(1|8)0[0-9][0-9]\]'
LOG = logging.getLogger('test_shell_interactive')
@pytest.fixture
def tmp_history_file(request):
"""
Test fixture which uses a temporary file as the path for the shell
history.
"""
tmp = NamedTemporaryFile()
old_path = os.environ.get('IMPALA_HISTFILE')
os.environ['IMPALA_HISTFILE'] = tmp.name
def cleanup():
if old_path is not None:
os.environ['IMPALA_HISTFILE'] = old_path
else:
del os.environ['IMPALA_HISTFILE']
request.addfinalizer(cleanup)
return tmp.name
class UnavailableRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""An HTTP server that always returns 503"""
def do_POST(self):
self.send_response(code=httplib.SERVICE_UNAVAILABLE, message="Service Unavailable")
def get_unused_port():
""" Find an unused port http://stackoverflow.com/questions/1365265 """
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
class TestImpalaShellInteractive(ImpalaTestSuite):
"""Test the impala shell interactively"""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
# Run with both beeswax and HS2 to ensure that behaviour is the same.
cls.ImpalaTestMatrix.add_dimension(create_client_protocol_dimension())
def _expect_with_cmd(self, proc, cmd, vector, expectations=(), db="default"):
"""Executes a command on the expect process instance and verifies a set of
assertions defined by the expectations."""
proc.sendline(cmd + ";")
proc.expect(":{0}] {1}>".format(get_impalad_port(vector), db))
if not expectations: return
for e in expectations:
assert e in proc.before
def _wait_for_num_open_sessions(self, vector, impala_service, expected, err):
"""Helper method to wait for the number of open sessions to reach 'expected'."""
metric_name = get_open_sessions_metric(vector)
try:
actual = impala_service.wait_for_metric_value(metric_name, expected)
except AssertionError:
LOG.exception("Error: " % err)
raise
assert actual == expected, err
def test_local_shell_options(self, vector):
"""Test that setting the local shell options works"""
shell_cmd = get_shell_cmd(vector)
proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(proc, "set", vector,
("LIVE_PROGRESS: True", "LIVE_SUMMARY: False"))
self._expect_with_cmd(proc, "set live_progress=true", vector)
self._expect_with_cmd(proc, "set", vector,
("LIVE_PROGRESS: True", "LIVE_SUMMARY: False"))
self._expect_with_cmd(proc, "set live_summary=1", vector)
self._expect_with_cmd(proc, "set", vector,
("LIVE_PROGRESS: True", "LIVE_SUMMARY: True"))
self._expect_with_cmd(proc, "set", vector,
("WRITE_DELIMITED: False", "VERBOSE: True"))
self._expect_with_cmd(proc, "set", vector,
("DELIMITER: \\t", "OUTPUT_FILE: None"))
self._expect_with_cmd(proc, "set write_delimited=true", vector)
self._expect_with_cmd(proc, "set", vector, ("WRITE_DELIMITED: True", "VERBOSE: True"))
self._expect_with_cmd(proc, "set DELIMITER=,", vector)
self._expect_with_cmd(proc, "set", vector, ("DELIMITER: ,", "OUTPUT_FILE: None"))
self._expect_with_cmd(proc, "set output_file=/tmp/clmn.txt", vector)
self._expect_with_cmd(proc, "set", vector,
("DELIMITER: ,", "OUTPUT_FILE: /tmp/clmn.txt"))
proc.sendeof()
proc.wait()
@pytest.mark.execute_serially
def test_write_delimited(self, vector):
"""Test output rows in delimited mode"""
p = ImpalaShell(vector)
p.send_cmd("use tpch")
p.send_cmd("set write_delimited=true")
p.send_cmd("select * from nation")
result = p.get_result()
assert "+----------------+" not in result.stdout
assert "21\tVIETNAM\t2" in result.stdout
@pytest.mark.execute_serially
def test_change_delimiter(self, vector):
"""Test change output delimiter if delimited mode is enabled"""
p = ImpalaShell(vector)
p.send_cmd("use tpch")
p.send_cmd("set write_delimited=true")
p.send_cmd("set delimiter=,")
p.send_cmd("select * from nation")
result = p.get_result()
assert "21,VIETNAM,2" in result.stdout
@pytest.mark.execute_serially
def test_print_to_file(self, vector):
"""Test print to output file and unset"""
# test print to file
p1 = ImpalaShell(vector)
p1.send_cmd("use tpch")
local_file = NamedTemporaryFile(delete=True)
p1.send_cmd("set output_file=%s" % local_file.name)
p1.send_cmd("select * from nation")
result = p1.get_result()
assert "VIETNAM" not in result.stdout
with open(local_file.name, "r") as fi:
# check if the results were written to the file successfully
result = fi.read()
assert "VIETNAM" in result
# test unset to print back to stdout
p2 = ImpalaShell(vector)
p2.send_cmd("use tpch")
p2.send_cmd("set output_file=%s" % local_file.name)
p2.send_cmd("unset output_file")
p2.send_cmd("select * from nation")
result = p2.get_result()
assert "VIETNAM" in result.stdout
def test_compute_stats_with_live_progress_options(self, vector, unique_database):
"""Test that setting LIVE_PROGRESS options won't cause COMPUTE STATS query fail"""
p = ImpalaShell(vector)
p.send_cmd("set live_progress=True")
p.send_cmd("set live_summary=True")
table = "{0}.live_progress_option".format(unique_database)
p.send_cmd('create table {0}(col int);'.format(table))
try:
p.send_cmd('compute stats {0};'.format(table))
finally:
p.send_cmd('drop table if exists {0};'.format(table))
result = p.get_result()
assert "Updated 1 partition(s) and 1 column(s)" in result.stdout
def test_escaped_quotes(self, vector):
"""Test escaping quotes"""
# test escaped quotes outside of quotes
result = run_impala_shell_interactive(vector, "select \\'bc';")
assert "Unexpected character" in result.stderr
result = run_impala_shell_interactive(vector, "select \\\"bc\";")
assert "Unexpected character" in result.stderr
# test escaped quotes within quotes
result = run_impala_shell_interactive(vector, "select 'ab\\'c';")
assert "Fetched 1 row(s)" in result.stderr
result = run_impala_shell_interactive(vector, "select \"ab\\\"c\";")
assert "Fetched 1 row(s)" in result.stderr
@pytest.mark.execute_serially
def test_cancellation(self, vector):
impalad = ImpaladService(socket.getfqdn())
assert impalad.wait_for_num_in_flight_queries(0)
command = "select sleep(10000);"
p = ImpalaShell(vector)
p.send_cmd(command)
sleep(3)
os.kill(p.pid(), signal.SIGINT)
result = p.get_result()
assert "Cancelled" not in result.stderr
assert impalad.wait_for_num_in_flight_queries(0)
p = ImpalaShell(vector)
sleep(3)
os.kill(p.pid(), signal.SIGINT)
result = p.get_result()
assert "^C" in result.stderr
@pytest.mark.execute_serially
def test_cancellation_mid_command(self, vector):
"""The test starts with sending in a multi-line input without a command delimiter.
When the impala-shell is waiting for more input, the test sends a SIGINT signal (to
simulate pressing Ctrl-C) followed by a final query terminated with semicolon.
The expected behavior for the impala shell is to discard everything before the
SIGINT signal was sent and execute the final query only."""
shell_cmd = get_shell_cmd(vector)
queries = [
"line 1\n", "line 2\n", "line 3\n\n", "line 4 and", " 5\n",
"line 6\n", "line 7\n", "line 8\n", "line 9\n", "line 10"]
# Check when the last line before Ctrl-C doesn't end with newline.
child_proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
for query in queries:
child_proc.send(query)
child_proc.sendintr()
child_proc.send('select "test without newline";\n')
child_proc.expect("test without newline")
child_proc.sendline('quit;')
child_proc.wait()
# Check when the last line before Ctrl-C ends with newline.
child_proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
for query in queries:
child_proc.send(query)
# Sending in a newline so it will end with one
child_proc.send("\n")
# checking if it realy is a new line
child_proc.expect(" > ")
child_proc.sendintr()
child_proc.send('select "test with newline";\n')
child_proc.expect("test with newline")
child_proc.sendline('quit;')
child_proc.wait()
def test_unicode_input(self, vector):
"Test queries containing non-ascii input"
# test a unicode query spanning multiple lines
unicode_text = u'\ufffd'
args = "select '%s'\n;" % unicode_text.encode('utf-8')
result = run_impala_shell_interactive(vector, args)
assert "Fetched 1 row(s)" in result.stderr
def test_welcome_string(self, vector):
"""Test that the shell's welcome message is only printed once
when the shell is started. Ensure it is not reprinted on errors.
Regression test for IMPALA-1153
"""
result = run_impala_shell_interactive(vector, 'asdf;')
assert result.stdout.count("Welcome to the Impala shell") == 1
result = run_impala_shell_interactive(vector, 'select * from non_existent_table;')
assert result.stdout.count("Welcome to the Impala shell") == 1
def test_disconnected_shell(self, vector):
"""Test that the shell presents a disconnected prompt if it can't connect
"""
result = run_impala_shell_interactive(vector, 'asdf;', shell_args=['-ifoo'],
wait_until_connected=False)
assert ImpalaShellClass.DISCONNECTED_PROMPT in result.stdout, result.stderr
def test_quit_no_reconnect(self, vector):
"""Test that a disconnected shell does not try to reconnect if quitting"""
result = run_impala_shell_interactive(vector, 'quit;', shell_args=['-ifoo'],
wait_until_connected=False)
assert "reconnect" not in result.stderr
result = run_impala_shell_interactive(vector, 'exit;', shell_args=['-ifoo'],
wait_until_connected=False)
assert "reconnect" not in result.stderr
# Null case: This is not quitting, so it will result in an attempt to reconnect.
result = run_impala_shell_interactive(vector, 'show tables;', shell_args=['-ifoo'],
wait_until_connected=False)
assert "reconnect" in result.stderr
def test_bash_cmd_timing(self, vector):
"""Test existence of time output in bash commands run from shell"""
args = ["! ls;"]
result = run_impala_shell_interactive(vector, args)
assert "Executed in" in result.stderr
@SkipIfLocal.multiple_impalad
@pytest.mark.execute_serially
def test_reconnect(self, vector):
"""Regression Test for IMPALA-1235
Verifies that a connect command by the user is honoured.
"""
try:
# Disconnect existing clients so there are no open sessions.
self.close_impala_clients()
hostname = socket.getfqdn()
initial_impala_service = ImpaladService(hostname)
target_impala_service = ImpaladService(hostname, webserver_port=25001,
beeswax_port=21001, be_port=22001, hs2_port=21051, hs2_http_port=28001)
protocol = vector.get_value("protocol").lower()
if protocol == "hs2":
target_port = 21051
elif protocol == "hs2-http":
target_port = 28001
else:
assert protocol == "beeswax"
target_port = 21001
# This test is running serially, so there shouldn't be any open sessions, but wait
# here in case a session from a previous test hasn't been fully closed yet.
self._wait_for_num_open_sessions(vector, initial_impala_service, 0,
"first impalad should not have any remaining open sessions.")
self._wait_for_num_open_sessions(vector, target_impala_service, 0,
"second impalad should not have any remaining open sessions.")
# Connect to the first impalad
p = ImpalaShell(vector)
# Make sure we're connected <hostname>:<port>
self._wait_for_num_open_sessions(vector, initial_impala_service, 1,
"Not connected to %s:%d" % (hostname, get_impalad_port(vector)))
p.send_cmd("connect %s:%d" % (hostname, target_port))
# The number of sessions on the target impalad should have been incremented.
self._wait_for_num_open_sessions(vector,
target_impala_service, 1, "Not connected to %s:%d" % (hostname, target_port))
assert "[%s:%d] default>" % (hostname, target_port) in p.get_result().stdout
# The number of sessions on the initial impalad should have been decremented.
self._wait_for_num_open_sessions(vector, initial_impala_service, 0,
"Connection to %s:%d should have been closed" % (
hostname, get_impalad_port(vector)))
finally:
self.create_impala_clients()
@pytest.mark.execute_serially
def test_ddl_queries_are_closed(self, vector):
"""Regression test for IMPALA-1317
The shell does not call close() for alter, use and drop queries, leaving them in
flight. This test issues those queries in interactive mode, and checks the debug
webpage to confirm that they've been closed.
TODO: Add every statement type.
"""
# Disconnect existing clients so there are no open sessions.
self.close_impala_clients()
TMP_DB = 'inflight_test_db'
TMP_TBL = 'tmp_tbl'
MSG = '%s query should be closed'
NUM_QUERIES = 'impala-server.num-queries'
impalad = ImpaladService(socket.getfqdn())
self._wait_for_num_open_sessions(vector, impalad, 0,
"Open sessions found after closing all clients.")
p = ImpalaShell(vector)
try:
start_num_queries = impalad.get_metric_value(NUM_QUERIES)
p.send_cmd('create database if not exists %s' % TMP_DB)
p.send_cmd('use %s' % TMP_DB)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 2)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'use'
p.send_cmd('create table %s(i int)' % TMP_TBL)
p.send_cmd('alter table %s add columns (j int)' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 4)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'alter'
p.send_cmd('drop table %s' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 5)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'drop'
finally:
# get_result() must be called to exit the shell.
p.get_result()
self._wait_for_num_open_sessions(vector, impalad, 0,
"shell should close sessions.")
run_impala_shell_interactive(vector, "drop table if exists %s.%s;" % (
TMP_DB, TMP_TBL))
run_impala_shell_interactive(vector, "drop database if exists foo;")
self.create_impala_clients()
def test_multiline_queries_in_history(self, vector, tmp_history_file):
"""Test to ensure that multiline queries with comments are preserved in history
Ensure that multiline queries are preserved when they're read back from history.
Additionally, also test that comments are preserved.
"""
# readline gets its input from tty, so using stdin does not work.
shell_cmd = get_shell_cmd(vector)
child_proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
# List of (input query, expected text in output).
# The expected output is usually the same as the input with a number prefix, except
# where the shell strips newlines before a semicolon.
queries = [
("select\n1;--comment", "[1]: select\n1;--comment"),
("select 1 --comment\n;", "[2]: select 1 --comment;"),
("select 1 --comment\n\n\n;", "[3]: select 1 --comment;"),
("select /*comment*/\n1;", "[4]: select /*comment*/\n1;"),
("select\n/*comm\nent*/\n1;", "[5]: select\n/*comm\nent*/\n1;")]
for query, _ in queries:
child_proc.expect(PROMPT_REGEX)
child_proc.sendline(query)
child_proc.expect("Fetched 1 row\(s\) in [0-9]+\.?[0-9]*s")
child_proc.expect(PROMPT_REGEX)
child_proc.sendline('quit;')
child_proc.wait()
p = ImpalaShell(vector)
p.send_cmd('history')
result = p.get_result()
for _, history_entry in queries:
assert history_entry in result.stderr, "'%s' not in '%s'" % (history_entry,
result.stderr)
def test_history_file_option(self, vector, tmp_history_file):
"""
Setting the 'tmp_history_file' fixture above means that the IMPALA_HISTFILE
environment will be overridden. Here we override that environment by passing
the --history_file command line option, ensuring that the history ends up
in the appropriate spot.
"""
with NamedTemporaryFile() as new_hist:
shell_cmd = get_shell_cmd(vector) + ["--history_file=%s" % new_hist.name]
child_proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
child_proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(child_proc, "select 'hi'", vector, ('hi'))
child_proc.sendline('exit;')
child_proc.expect(pexpect.EOF)
history_contents = file(new_hist.name).read()
assert "select 'hi'" in history_contents
def test_rerun(self, vector, tmp_history_file):
"""Smoke test for the 'rerun' command"""
shell_cmd = get_shell_cmd(vector)
child_proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
child_proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(child_proc, "@1", vector, ("Command index out of range"))
self._expect_with_cmd(child_proc, "rerun -1", vector,
("Command index out of range"))
self._expect_with_cmd(child_proc, "select 'first_command'", vector,
("first_command"))
self._expect_with_cmd(child_proc, "rerun 1", vector, ("first_command"))
self._expect_with_cmd(child_proc, "@ -1", vector, ("first_command"))
self._expect_with_cmd(child_proc, "select 'second_command'", vector,
("second_command"))
child_proc.sendline('history;')
child_proc.expect(":{0}] default>".format(get_impalad_port(vector)))
assert '[1]: select \'first_command\';' in child_proc.before
assert '[2]: select \'second_command\';' in child_proc.before
assert '[3]: history;' in child_proc.before
# Rerunning command should not add an entry into history.
assert '[4]' not in child_proc.before
self._expect_with_cmd(child_proc, "@0", vector, ("Command index out of range"))
self._expect_with_cmd(child_proc, "rerun 4", vector, ("Command index out of range"))
self._expect_with_cmd(child_proc, "@-4", vector, ("Command index out of range"))
self._expect_with_cmd(child_proc, " @ 3 ", vector, ("second_command"))
self._expect_with_cmd(child_proc, "@-3", vector, ("first_command"))
self._expect_with_cmd(child_proc, "@", vector,
("Command index to be rerun must be an integer."))
self._expect_with_cmd(child_proc, "@1foo", vector,
("Command index to be rerun must be an integer."))
self._expect_with_cmd(child_proc, "@1 2", vector,
("Command index to be rerun must be an integer."))
self._expect_with_cmd(child_proc, "rerun1", vector, ("Syntax error"))
child_proc.sendline('quit;')
child_proc.wait()
def test_tip(self, vector):
"""Smoke test for the TIP command"""
# Temporarily add impala_shell module to path to get at TIPS list for verification
sys.path.append("%s/shell/" % os.environ['IMPALA_HOME'])
try:
import impala_shell
finally:
sys.path = sys.path[:-1]
result = run_impala_shell_interactive(vector, "tip;")
for t in impala_shell.TIPS:
if t in result.stderr: return
assert False, "No tip found in output %s" % result.stderr
def test_var_substitution(self, vector):
cmds = open(os.path.join(QUERY_FILE_PATH, 'test_var_substitution.sql')).read()
args = ["--var=foo=123", "--var=BAR=456", "--delimited", "--output_delimiter= "]
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert_var_substitution(result)
def test_query_option_configuration(self, vector):
rcfile_path = os.path.join(QUERY_FILE_PATH, 'impalarc_with_query_options')
args = ['-Q', 'MT_dop=1', '--query_option=MAX_ERRORS=200',
'--config_file=%s' % rcfile_path]
cmds = "set all;"
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert "\tMT_DOP: 1" in result.stdout
assert "\tMAX_ERRORS: 200" in result.stdout
assert "\tEXPLAIN_LEVEL: 2" in result.stdout
assert "INVALID_QUERY_OPTION is not supported for the impalad being connected to, "\
"ignoring." in result.stdout
# Verify that query options under [impala] override those under [impala.query_options]
assert "\tDEFAULT_FILE_FORMAT: avro" in result.stdout
def test_commandline_flag_disable_live_progress(self, vector):
"""Test the command line flag disable_live_progress with live_progress."""
# By default, shell option live_progress is set to True in the interactive mode.
cmds = "set all;"
result = run_impala_shell_interactive(vector, cmds)
assert "\tLIVE_PROGRESS: True" in result.stdout
# override the default option through command line argument.
args = ['--disable_live_progress']
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert "\tLIVE_PROGRESS: False" in result.stdout
# set live_progress as True with config file.
# override the option in config file through command line argument.
rcfile_path = os.path.join(QUERY_FILE_PATH, 'good_impalarc3')
args = ['--disable_live_progress', '--config_file=%s' % rcfile_path]
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert "\tLIVE_PROGRESS: False" in result.stdout
def test_live_option_configuration(self, vector):
"""Test the optional configuration file with live_progress and live_summary."""
# Positive tests
# set live_summary and live_progress as True with config file
rcfile_path = os.path.join(QUERY_FILE_PATH, 'good_impalarc3')
args = ['--config_file=%s' % rcfile_path]
cmds = "set all;"
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert 'WARNING:' not in result.stderr, \
"A valid config file should not trigger any warning: {0}".format(result.stderr)
assert "\tLIVE_SUMMARY: True" in result.stdout
assert "\tLIVE_PROGRESS: True" in result.stdout
# set live_summary and live_progress as False with config file
rcfile_path = os.path.join(QUERY_FILE_PATH, 'good_impalarc4')
args = ['--config_file=%s' % rcfile_path]
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert 'WARNING:' not in result.stderr, \
"A valid config file should not trigger any warning: {0}".format(result.stderr)
assert "\tLIVE_SUMMARY: False" in result.stdout
assert "\tLIVE_PROGRESS: False" in result.stdout
# override options in config file through command line arguments
args = ['--live_progress', '--live_summary', '--config_file=%s' % rcfile_path]
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert "\tLIVE_SUMMARY: True" in result.stdout
assert "\tLIVE_PROGRESS: True" in result.stdout
def test_source_file(self, vector):
cwd = os.getcwd()
try:
# Change working dir so that SOURCE command in shell.cmds can find shell2.cmds.
os.chdir("%s/tests/shell/" % os.environ['IMPALA_HOME'])
# IMPALA-5416: Test that a command following 'source' won't be run twice.
result = run_impala_shell_interactive(vector, "source shell.cmds;select \"second "
"command\";")
assert "Query: USE FUNCTIONAL" in result.stderr
assert "Query: SHOW TABLES" in result.stderr
assert "alltypes" in result.stdout
# This is from shell2.cmds, the result of sourcing a file from a sourced file.
assert "SELECT VERSION()" in result.stderr
assert "version()" in result.stdout
assert len(re.findall("'second command'", result.stdout)) == 1
# IMPALA-5416: Test that two source commands on a line won't crash the shell.
result = run_impala_shell_interactive(
vector, "source shell.cmds;source shell.cmds;")
assert len(re.findall("version\(\)", result.stdout)) == 2
finally:
os.chdir(cwd)
def test_source_file_with_errors(self, vector):
full_path = "%s/tests/shell/shell_error.cmds" % os.environ['IMPALA_HOME']
result = run_impala_shell_interactive(vector, "source %s;" % full_path)
assert "Could not execute command: USE UNKNOWN_DATABASE" in result.stderr
assert "Query: USE FUNCTIONAL" not in result.stderr
result = run_impala_shell_interactive(vector, "source %s;" % full_path, ['-c'])
assert "Could not execute command: USE UNKNOWN_DATABASE" in result.stderr,\
result.stderr
assert "Query: USE FUNCTIONAL" in result.stderr, result.stderr
assert "Query: SHOW TABLES" in result.stderr, result.stderr
assert "alltypes" in result.stdout, result.stdout
def test_source_missing_file(self, vector):
full_path = "%s/tests/shell/doesntexist.cmds" % os.environ['IMPALA_HOME']
result = run_impala_shell_interactive(vector, "source %s;" % full_path)
assert "No such file or directory" in result.stderr
def test_zero_row_fetch(self, vector):
# IMPALA-4418: DROP and USE are generally exceptional statements where
# the client does not fetch. For statements returning 0 rows we do not
# want an empty line in stdout.
result = run_impala_shell_interactive(vector, "-- foo \n use default;")
assert re.search('> \[', result.stdout)
result = run_impala_shell_interactive(vector,
"select * from functional.alltypes limit 0;")
assert "Fetched 0 row(s)" in result.stderr
assert re.search('> \[', result.stdout)
def test_set_and_set_all(self, vector):
"""IMPALA-2181. Tests the outputs of SET and SET ALL commands. SET should contain the
REGULAR and ADVANCED options only. SET ALL should contain all the options grouped by
display level."""
shell1 = ImpalaShell(vector)
shell1.send_cmd("set")
result = shell1.get_result()
assert "Query options (defaults shown in []):" in result.stdout
assert "ABORT_ON_ERROR" in result.stdout
assert "Advanced Query Options:" in result.stdout
assert "APPX_COUNT_DISTINCT" in result.stdout
assert vector.get_value("protocol") in ("hs2", "hs2-http")\
or "SUPPORT_START_OVER" in result.stdout
# Development, deprecated and removed options should not be shown.
# Note: there are currently no deprecated options
assert "Development Query Options:" not in result.stdout
assert "DEBUG_ACTION" not in result.stdout # Development option.
assert "MAX_IO_BUFFERS" not in result.stdout # Removed option.
shell2 = ImpalaShell(vector)
shell2.send_cmd("set all")
result = shell2.get_result()
assert "Query options (defaults shown in []):" in result.stdout
assert "Advanced Query Options:" in result.stdout
assert "Development Query Options:" in result.stdout
assert "Deprecated Query Options:" not in result.stdout
advanced_part_start_idx = result.stdout.find("Advanced Query Options")
development_part_start_idx = result.stdout.find("Development Query Options")
deprecated_part_start_idx = result.stdout.find("Deprecated Query Options")
advanced_part = result.stdout[advanced_part_start_idx:development_part_start_idx]
development_part = result.stdout[development_part_start_idx:deprecated_part_start_idx]
assert "ABORT_ON_ERROR" in result.stdout[:advanced_part_start_idx]
assert "APPX_COUNT_DISTINCT" in advanced_part
assert vector.get_value("protocol") in ("hs2", "hs2-http")\
or "SUPPORT_START_OVER" in advanced_part
assert "DEBUG_ACTION" in development_part
# Removed options should not be shown.
assert "MAX_IO_BUFFERS" not in result.stdout
def check_command_case_sensitivity(self, vector, command, expected):
shell = ImpalaShell(vector)
shell.send_cmd(command)
assert expected in shell.get_result().stderr
def test_unexpected_conversion_for_literal_string_to_lowercase(self, vector):
# IMPALA-4664: Impala shell can accidentally convert certain literal
# strings to lowercase. Impala shell splits each command into tokens
# and then converts the first token to lowercase to figure out how it
# should execute the command. The splitting is done by spaces only.
# Thus, if the user types a TAB after the SELECT, the first token after
# the split becomes the SELECT plus whatever comes after it.
result = run_impala_shell_interactive(vector, "select'MUST_HAVE_UPPER_STRING'")
assert re.search('MUST_HAVE_UPPER_STRING', result.stdout)
result = run_impala_shell_interactive(vector, "select\t'MUST_HAVE_UPPER_STRING'")
assert re.search('MUST_HAVE_UPPER_STRING', result.stdout)
result = run_impala_shell_interactive(vector, "select\n'MUST_HAVE_UPPER_STRING'")
assert re.search('MUST_HAVE_UPPER_STRING', result.stdout)
def test_case_sensitive_command(self, vector):
# IMPALA-2640: Make a given command case-sensitive
cwd = os.getcwd()
try:
self.check_command_case_sensitivity(vector, "sElEcT VERSION()", "Query: sElEcT")
self.check_command_case_sensitivity(vector, "sEt VaR:FoO=bOo", "Variable FOO")
self.check_command_case_sensitivity(vector, "sHoW tables", "Query: sHoW")
# Change working dir so that SOURCE command in shell_case_sensitive.cmds can
# find shell_case_sensitive2.cmds.
os.chdir("%s/tests/shell/" % os.environ['IMPALA_HOME'])
result = run_impala_shell_interactive(vector,
"sOuRcE shell_case_sensitive.cmds; SeLeCt 'second command'")
print result.stderr
assert "Query: uSe FUNCTIONAL" in result.stderr
assert "Query: ShOw TABLES" in result.stderr
assert "alltypes" in result.stdout
# This is from shell_case_sensitive2.cmds, the result of sourcing a file
# from a sourced file.
print result.stderr
assert "SeLeCt 'second command'" in result.stderr
finally:
os.chdir(cwd)
def test_line_with_leading_comment(self, vector, unique_database):
# IMPALA-2195: A line with a comment produces incorrect command.
table = "{0}.leading_comment".format(unique_database)
run_impala_shell_interactive(vector, 'create table {0} (i int);'.format(table))
result = run_impala_shell_interactive(vector, '-- comment\n'
'insert into {0} values(1);'.format(table))
assert 'Modified 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '-- comment\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '--한글\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* 한글 */\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment */\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment1 */\n'
'-- comment2\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment1\n'
'comment2 */ select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* select * from {0} */ '
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment */ help use')
assert 'Executes a USE... query' in result.stdout
result = run_impala_shell_interactive(vector, '-- comment\n'
' help use;')
assert 'Executes a USE... query' in result.stdout
result = run_impala_shell_interactive(vector, '/* comment1 */\n'
'-- comment2\n'
'desc {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment1 */\n'
'-- comment2\n'
'help use;')
assert 'Executes a USE... query' in result.stdout
def test_line_ends_with_comment(self, vector):
# IMPALA-5269: Test lines that end with a comment.
queries = ['select 1 + 1; --comment',
'select 1 + 1 --comment\n;']
for query in queries:
result = run_impala_shell_interactive(vector, query)
assert '| 1 + 1 |' in result.stdout
assert '| 2 |' in result.stdout
queries = ['select \'some string\'; --comment',
'select \'some string\' --comment\n;']
for query in queries:
result = run_impala_shell_interactive(vector, query)
assert '| \'some string\' |' in result.stdout
assert '| some string |' in result.stdout
queries = ['select "--"; -- "--"',
'select \'--\'; -- "--"',
'select "--" -- "--"\n;',
'select \'--\' -- "--"\n;']
for query in queries:
result = run_impala_shell_interactive(vector, query)
assert '| \'--\' |' in result.stdout
assert '| -- |' in result.stdout
query = ('select * from (\n' +
'select count(*) from functional.alltypes\n' +
') v; -- Incomplete SQL statement in this line')
result = run_impala_shell_interactive(vector, query)
assert '| count(*) |' in result.stdout
query = ('select id from functional.alltypes\n' +
'order by id; /*\n' +
'* Multi-line comment\n' +
'*/')
result = run_impala_shell_interactive(vector, query)
assert '| id |' in result.stdout
def test_fix_infinite_loop(self, vector):
# IMPALA-6337: Fix infinite loop.
result = run_impala_shell_interactive(vector, "select 1 + 1; \"\n;\";")
assert '| 2 |' in result.stdout
result = run_impala_shell_interactive(vector, "select '1234'\";\n;\n\";")
assert '| 1234 |' in result.stdout
result = run_impala_shell_interactive(vector, "select 1 + 1; \"\n;\"\n;")
assert '| 2 |' in result.stdout
result = run_impala_shell_interactive(vector, "select '1\\'23\\'4'\";\n;\n\";")
assert '| 1\'23\'4 |' in result.stdout
result = run_impala_shell_interactive(vector, "select '1\"23\"4'\";\n;\n\";")
assert '| 1"23"4 |' in result.stdout
def test_comment_with_quotes(self, vector):
# IMPALA-2751: Comment does not need to have matching quotes
queries = [
"select -- '\n1;",
'select -- "\n1;',
"select -- \"'\n 1;",
"select /*'\n*/ 1;",
'select /*"\n*/ 1;',
"select /*\"'\n*/ 1;",
"with a as (\nselect 1\n-- '\n) select * from a",
'with a as (\nselect 1\n-- "\n) select * from a',
"with a as (\nselect 1\n-- '\"\n) select * from a",
]
for query in queries:
result = run_impala_shell_interactive(vector, query)
assert '| 1 |' in result.stdout
def test_shell_prompt(self, vector):
shell_cmd = get_shell_cmd(vector)
proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(proc, "use foo", vector, (), 'default')
self._expect_with_cmd(proc, "use functional", vector, (), 'functional')
self._expect_with_cmd(proc, "use foo", vector, (), 'functional')
self._expect_with_cmd(proc, 'use `tpch`', vector, (), 'tpch')
self._expect_with_cmd(proc, 'use ` tpch `', vector, (), 'tpch')
proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:] + ['-d', 'functional'])
proc.expect(":{0}] functional>".format(get_impalad_port(vector)))
self._expect_with_cmd(proc, "use foo", vector, (), 'functional')
self._expect_with_cmd(proc, "use tpch", vector, (), 'tpch')
self._expect_with_cmd(proc, "use foo", vector, (), 'tpch')
proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:] + ['-d', ' functional '])
proc.expect(":{0}] functional>".format(get_impalad_port(vector)))
proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:] + ['-d', '` functional `'])
proc.expect(":{0}] functional>".format(get_impalad_port(vector)))
# Start an Impala shell with an invalid DB.
proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:] + ['-d', 'foo'])
proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(proc, "use foo", vector, (), 'default')
self._expect_with_cmd(proc, "use functional", vector, (), 'functional')
self._expect_with_cmd(proc, "use foo", vector, (), 'functional')
proc.sendeof()
proc.wait()
def test_strip_leading_comment(self, vector):
"""Test stripping leading comments from SQL statements"""
assert ('--delete\n', 'select 1') == \
ImpalaShellClass.strip_leading_comment('--delete\nselect 1')
assert ('--delete\n', 'select --do not delete\n1') == \
ImpalaShellClass.strip_leading_comment('--delete\nselect --do not delete\n1')
assert (None, 'select --do not delete\n1') == \
ImpalaShellClass.strip_leading_comment('select --do not delete\n1')
assert ('/*delete*/\n', 'select 1') == \
ImpalaShellClass.strip_leading_comment('/*delete*/\nselect 1')
assert ('/*delete\nme*/\n', 'select 1') == \
ImpalaShellClass.strip_leading_comment('/*delete\nme*/\nselect 1')
assert ('/*delete\nme*/\n', 'select 1') == \
ImpalaShellClass.strip_leading_comment('/*delete\nme*/\nselect 1')
assert ('/*delete*/', 'select 1') == \
ImpalaShellClass.strip_leading_comment('/*delete*/select 1')
assert ('/*delete*/ ', 'select /*do not delete*/ 1') == \
ImpalaShellClass.strip_leading_comment('/*delete*/ select /*do not delete*/ 1')
assert ('/*delete1*/ \n/*delete2*/ \n--delete3 \n', 'select /*do not delete*/ 1') == \
ImpalaShellClass.strip_leading_comment('/*delete1*/ \n'
'/*delete2*/ \n'
'--delete3 \n'
'select /*do not delete*/ 1')
assert (None, 'select /*do not delete*/ 1') == \
ImpalaShellClass.strip_leading_comment('select /*do not delete*/ 1')
assert ('/*delete*/\n', 'select c1 from\n'
'a\n'
'join -- +SHUFFLE\n'
'b') == \
ImpalaShellClass.strip_leading_comment('/*delete*/\n'
'select c1 from\n'
'a\n'
'join -- +SHUFFLE\n'
'b')
assert ('/*delete*/\n', 'select c1 from\n'
'a\n'
'join /* +SHUFFLE */\n'
'b') == \
ImpalaShellClass.strip_leading_comment('/*delete*/\n'
'select c1 from\n'
'a\n'
'join /* +SHUFFLE */\n'
'b')
assert (None, 'select 1') == \
ImpalaShellClass.strip_leading_comment('select 1')
def test_malformed_query(self, vector):
"""Test the handling of malformed query without closing quotation"""
shell = ImpalaShell(vector)
query = "with v as (select 1) \nselect foo('\\\\'), ('bar \n;"
shell.send_cmd(query)
result = shell.get_result()
assert "ERROR: ParseException: Unmatched string literal" in result.stderr,\
result.stderr
def test_timezone_validation(self, vector):
"""Test that query option TIMEZONE is validated when executing a query.
Query options are not sent to the coordinator immediately, so the error checking
will only happen when running a query.
"""
p = ImpalaShell(vector)
p.send_cmd('set timezone=BLA;')
p.send_cmd('select 1;')
results = p.get_result()
assert "Fetched 1 row" not in results.stderr
# assert "ERROR: Errors parsing query options" in results.stderr, results.stderr
assert "Invalid timezone name 'BLA'" in results.stderr, results.stderr
def test_with_clause(self, vector):
# IMPALA-7939: Fix issue where CTE that contains "insert", "upsert", "update", or
# "delete" is categorized as a DML statement.
for keyword in ["insert", "upsert", "update", "delete", "\\'insert\\'",
"\\'upsert\\'", "\\'update\\'", "\\'delete\\'"]:
p = ImpalaShell(vector)
p.send_cmd("with foo as "
"(select * from functional.alltypestiny where string_col='%s') "
"select * from foo limit 1" % keyword)
result = p.get_result()
assert "Fetched 0 row" in result.stderr
def test_http_codes(self, vector):
"""Check that the shell prints a good message when using hs2-http protocol
and the http server returns a 503 error."""
protocol = vector.get_value("protocol")
if protocol != 'hs2-http':
pytest.skip()
# Start an http server that always returns 503.
HOST = "localhost"
PORT = get_unused_port()
httpd = None
http_server_thread = None
try:
httpd = SocketServer.TCPServer((HOST, PORT), UnavailableRequestHandler)
http_server_thread = threading.Thread(target=httpd.serve_forever)
http_server_thread.start()
# Check that we get a message about the 503 error when we try to connect.
shell_args = ["--protocol={0}".format(protocol), "-i{0}:{1}".format(HOST, PORT)]
shell_proc = pexpect.spawn(IMPALA_SHELL_EXECUTABLE, shell_args)
shell_proc.expect("HTTP code 503", timeout=10)
finally:
# Clean up.
if httpd is not None:
httpd.shutdown()
if http_server_thread is not None:
http_server_thread.join()
def run_impala_shell_interactive(vector, input_lines, shell_args=None,
wait_until_connected=True):
"""Runs a command in the Impala shell interactively."""
# if argument "input_lines" is a string, makes it into a list
if type(input_lines) is str:
input_lines = [input_lines]
# workaround to make Popen environment 'utf-8' compatible
# since piping defaults to ascii
my_env = os.environ
my_env['PYTHONIOENCODING'] = 'utf-8'
p = ImpalaShell(vector, args=shell_args, env=my_env,
wait_until_connected=wait_until_connected)
for line in input_lines:
p.send_cmd(line)
return p.get_result()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum_ganja as electrum
from electrum_ganja.ganja import TYPE_ADDRESS
from electrum_ganja import WalletStorage, Wallet
from electrum_ganja_gui.kivy.i18n import _
from electrum_ganja.paymentrequest import InvoiceStore
from electrum_ganja.util import profiler, InvalidPassword
from electrum_ganja.plugins import run_hook
from electrum_ganja.util import format_satoshis, format_satoshis_plain
from electrum_ganja.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_ganja_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_ganja_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_ganja_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_ganja_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_ganja_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum_ganja.util import base_units
class Electrum-GanjaWindow(App):
electrum_ganja_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_ganja_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_ganja_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_ganja_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'ganja':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_ganja_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_ganja_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum-Ganja App')
self.electrum_ganja_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_ganja_config.get_fee_status()
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_ganja.ganja import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('ganjacoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum_ganja.transaction import Transaction
from electrum_ganja.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum_ganja.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum_ganja.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum_ganja.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for ganjacoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_ganja_config.get_wallet_path())
# URI passed in config
uri = self.electrum_ganja_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
Logger.debug('Electrum-Ganja: Wallet not found. Launching install wizard')
storage = WalletStorage(path, manual_upgrades=True)
wizard = Factory.InstallWizard(self.electrum_ganja_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
Logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_ganja_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_ganja_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_ganja_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_ganja_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum_ganja.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
self.status = self.wallet.basename() + (' [size=15dp](%s)[/size]'%status if status else '')
# balance
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_ganja_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_ganja_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum-Ganja', message,
app_icon=icon, app_name='Electrum-Ganja')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet.has_password and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_ganja_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_ganja_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_ganja_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_ganja_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
email.py
|
from flask_mail import Message
from app import mail
from flask import render_template
from app import app
from threading import Thread
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[InvestFly] Reset Your Password',
sender= 'investflycorporation@gmail.com',
recipients=[user.email],
text_body=render_template('email/reset_password.txt', user=user, token=token),
html_body=render_template('email/reset_password.html',user=user, token=token))
def send_user_verification_email(user):
token = user.get_verify_user_token()
send_email('[InvestFly] Welcome to InvestFly!',
sender= 'investflycorporation@gmail.com',
recipients=[user.email],
text_body=render_template('email/welcome.txt',user=user, token=token),
html_body=render_template('email/welcome.html',user=user, token=token))
def send_purchase_email(user, stock_data, bill, wallet):
send_email('[InvestFly]Purchase Confirmed!',
sender= 'investflycorporation@gmail.com',
recipients=[user.email],
text_body=render_template('email/conf_purchase.txt',user=user, stock=stock_data, bill=bill, wallet=wallet),
html_body=render_template('email/conf_purchase.html',user=user, stock=stock_data,bill=bill, wallet=wallet))
def send_listing_email(user, stock_data):
send_email('[InvestFly]Listing Confirmed!',
sender= 'investflycorporation@gmail.com',
recipients=[user.email],
text_body=render_template('email/conf_listing.txt',user=user,stock=stock_data),
html_body=render_template('email/conf_listing.html',user=user, stock=stock_data))
def send_sale_email(user, buyer, t_id, stock):
send_email('[InvestFly]Sale Confirmed!',
sender= 'investflycorporation@gmail.com',
recipients=[user.email],
text_body=render_template('email/conf_sale.txt',user=user, buyer=buyer, transaction_id=t_id , stock=stock),
html_body=render_template('email/conf_sale.html',user=user ,buyer=buyer, transaction_id=t_id, stock=stock))
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
|
tiendita.py
|
from threading import Semaphore, Thread
from time import sleep
from random import randint
#Constantes para controlar el numero de clientes que pueden estar en la tienda y el máximo de pedidos que puede hacer
maxClient = 3
maxPed = 5
#Variable para controlar el cerrado de la tiendita
n = 0
#Mecanismos a implementar en la solucíon
tiendita = Semaphore(maxClient) #Multiplex control de clientes en tiendita por COVID-19
pedidos = Semaphore(0) #Mutex de pedido realizado
despacho = Semaphore(1) #Mutex de proceso de despacho
turnos = Semaphore(1) #Mutex que maneja los turnos de pedido
#Lista donde manejamos que cliente hace pedido y que numero de pedido es
turno = []
#Funcion que maneja al hilo de la tendera
def tendera():
global n
while(True):
#La tendera espera a que se le realice un pedido
pedidos.acquire()
#Se despacha el pedido en turno
turnos.acquire()
#Vaciamos la lista de los pedidos en turno
numCliente = turno.pop(0)
articulo = turno.pop(0)
print(' La tendera despacha el pedido %d del cliente %d' % (articulo,numCliente))
#Esperamos el siguiente pedido
turnos.release()
despacho.release()
#Mediante esta condicional rompemos el bucle para poder cerrar la tienda
if(n == clientes-1):
break
#Funcion que maneja los hilos de los clientes
def cliente(numCliente):
global n
#Generamos aleatoriamente el numero de artículos que el cliente va a comprar
articulos = randint(1,maxPed)
#Utilizaremos un contador para que los clientes puedan realizar pedidos hasta que ya tengan todos sus artículos a comprar
articulo = 1
#Ponemos a operar nuestro Multiplex para tener el control de cuantas personas estan dentro de la tiendita
tiendita.acquire()
print('El cliente %d ha entrado a la tiendita y va a comprar %d cosas' % (numCliente, articulos))
#Mientras el cleinte tenga articulos por comprar realizará pedidos
while(articulo <= articulos):
#Se inicia el proceso de despacho del pedido en turno
despacho.acquire()
#El cliente realiza su pedido en turno
turnos.acquire()
print(' El cliente %d hace su pedido numero %d' % (numCliente, articulo))
#Almacenamos el pedido en turno y el cliente al que pertenece
turno.append(numCliente)
turno.append(articulo)
turnos.release()
#La tendera toma el pedido
pedidos.release()
#Se reduce el numero de artículos a comprar
articulo += 1
sleep(0.3)
#Si el cliente ya no tiene más artículos por comprar se retira de la tiendita
print(' El cliente %d sale de la tiendita' % (numCliente))
print(' Tendera: "Hasta luego vuelva pronto!!!"')
#Sale de la tiendita y se libera un espacio
tiendita.release()
#Se aumenta el numero de clientes despachados en el día
n = n+1
#Si la tendera ha despachado a todos los clientes del dia se cierra la tiendita
if(n == clientes):
print('')
print('')
print('8 de la noche se cierra la tienda')
print('')
print('')
print(' ---------')
print(' -CERRADO-')
print(' ---------')
#Numero de clientes que irán a la tienda
clientes = int(input('¿Cuántos clientes se atenderan hoy?\n'))
#Se inicia el hilo de la tendera y se abre la tienda
Thread(target = tendera).start()
print('')
print('')
print('9 de la mañana se abre la tienda')
print('')
print('')
print(' ---------')
print(' -ABIERTO-')
print(' ---------')
print('')
print('')
#Se inicia el numero de hilos de acuerdo al numero de clientes que van a ir a la tienda
for i in range(1,clientes+1):
Thread(target = cliente, args = [i]).start()
|
autodownloader.py
|
import tiktok
import database
from time import sleep
from threading import Thread
class AutoDownloader():
def __init__(self, window, downloadqueue):
self.window = window
self.autoDownloadQueue = downloadqueue
self.clipIndex = 0
self.auto = False
def startAutoMode(self):
self.auto = True
self.findClips()
def startDownloading(self):
self.downloadClips()
def startFinding(self):
self.findClips()
def stop(self):
tiktok.forceStop = True
def findClips(self):
if self.clipIndex == 0:
self.window.start_clip_search.emit()
if not self.clipIndex == len(self.autoDownloadQueue):
Thread(target=tiktok.getAllClips, args=(self.autoDownloadQueue[self.clipIndex], int(self.window.bulkFindAmount.text()), self.window)).start()
self.clipIndex += 1
else:
self.clipIndex = 0
self.window.end_find_search.emit()
if self.auto:
self.downloadClips()
def downloadClips(self):
if self.clipIndex == 0:
self.window.start_download_search.emit()
if not self.clipIndex == len(self.autoDownloadQueue):
filter = self.autoDownloadQueue[self.clipIndex]
clips = database.getFoundClips(filter[0], int(self.window.bulkDownloadAmount.text()))
Thread(target=tiktok.autoDownloadClips, args=(filter[0], clips, self.window)).start()
self.clipIndex += 1
else:
self.clipIndex = 0
self.window.end_download_search.emit()
if self.auto:
self.findClips()
|
naming.py
|
"""
Name Server and helper functions.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import with_statement
import re, logging, socket, sys
from Pyro4 import constants, core, socketutil
from Pyro4.threadutil import RLock, Thread
from Pyro4.errors import PyroError, NamingError
import Pyro4
__all__=["locateNS", "resolve", "startNS"]
if sys.version_info>=(3, 0):
basestring=str
log=logging.getLogger("Pyro4.naming")
class NameServer(object):
"""Pyro name server. Provides a simple flat name space to map logical object names to Pyro URIs."""
def __init__(self):
self.namespace={}
self.lock=RLock()
def lookup(self, name):
"""Lookup the given name, returns an URI if found"""
try:
return core.URI(self.namespace[name])
except KeyError:
raise NamingError("unknown name: "+name)
def register(self, name, uri, safe=False):
"""Register a name with an URI. If safe is true, name cannot be registered twice.
The uri can be a string or an URI object."""
if isinstance(uri, core.URI):
uri=uri.asString()
elif not isinstance(uri, basestring):
raise TypeError("only URIs or strings can be registered")
else:
core.URI(uri) # check if uri is valid
if not isinstance(name, basestring):
raise TypeError("name must be a str")
if safe and name in self.namespace:
raise NamingError("name already registered: "+name)
with self.lock:
self.namespace[name]=uri
def remove(self, name=None, prefix=None, regex=None):
"""Remove a registration. returns the number of items removed."""
if name and name in self.namespace and name!=constants.NAMESERVER_NAME:
with self.lock:
del self.namespace[name]
return 1
if prefix:
with self.lock:
items=list(self.list(prefix=prefix).keys())
if constants.NAMESERVER_NAME in items:
items.remove(constants.NAMESERVER_NAME)
for item in items:
del self.namespace[item]
return len(items)
if regex:
with self.lock:
items=list(self.list(regex=regex).keys())
if constants.NAMESERVER_NAME in items:
items.remove(constants.NAMESERVER_NAME)
for item in items:
del self.namespace[item]
return len(items)
return 0
def list(self, prefix=None, regex=None):
"""Retrieve the registered items as a dictionary name-to-URI. The URIs
in the resulting dict are strings, not URI objects.
You can filter by prefix or by regex."""
with self.lock:
if prefix:
result={}
for name in self.namespace:
if name.startswith(prefix):
result[name]=self.namespace[name]
return result
elif regex:
result={}
try:
regex=re.compile(regex+"$") # add end of string marker
except re.error:
x=sys.exc_info()[1]
raise NamingError("invalid regex: "+str(x))
else:
for name in self.namespace:
if regex.match(name):
result[name]=self.namespace[name]
return result
else:
# just return (a copy of) everything
return self.namespace.copy()
def ping(self):
"""A simple test method to check if the name server is running correctly."""
pass
class NameServerDaemon(core.Daemon):
"""Daemon that contains the Name Server."""
def __init__(self, host=None, port=None, unixsocket=None, nathost=None, natport=None):
if Pyro4.config.DOTTEDNAMES:
raise PyroError("Name server won't start with DOTTEDNAMES enabled because of security reasons")
if host is None:
host=Pyro4.config.HOST
if port is None:
port=Pyro4.config.NS_PORT
if nathost is None:
nathost=Pyro4.config.NATHOST
if natport is None:
natport=Pyro4.config.NATPORT or None
super(NameServerDaemon, self).__init__(host, port, unixsocket, nathost=nathost, natport=natport)
self.nameserver=NameServer()
self.register(self.nameserver, constants.NAMESERVER_NAME)
self.nameserver.register(constants.NAMESERVER_NAME, self.uriFor(self.nameserver))
log.info("nameserver daemon created")
def close(self):
super(NameServerDaemon, self).close()
self.nameserver=None
def __enter__(self):
if not self.nameserver:
raise PyroError("cannot reuse this object")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.nameserver=None
return super(NameServerDaemon, self).__exit__(exc_type, exc_value, traceback)
class BroadcastServer(object):
REQUEST_NSURI = "GET_NSURI" if sys.platform=="cli" else b"GET_NSURI"
def __init__(self, nsUri, bchost=None, bcport=None):
self.nsUri=nsUri
if bcport is None:
bcport=Pyro4.config.NS_BCPORT
if bchost is None:
bchost=Pyro4.config.NS_BCHOST
if ":" in nsUri.host: # ipv6
bchost = bchost or "::"
self.sock=Pyro4.socketutil.createBroadcastSocket((bchost, bcport, 0, 0), reuseaddr=Pyro4.config.SOCK_REUSE, timeout=2.0)
else:
self.sock=Pyro4.socketutil.createBroadcastSocket((bchost, bcport), reuseaddr=Pyro4.config.SOCK_REUSE, timeout=2.0)
self._sockaddr=self.sock.getsockname()
bchost=bchost or self._sockaddr[0]
bcport=bcport or self._sockaddr[1]
if ":" in bchost: # ipv6
self.locationStr="[%s]:%d" % (bchost, bcport)
else:
self.locationStr="%s:%d" % (bchost, bcport)
log.info("ns broadcast server created on %s", self.locationStr)
self.running=True
def close(self):
log.debug("ns broadcast server closing")
self.running=False
try:
self.sock.shutdown(socket.SHUT_RDWR)
except (OSError, socket.error):
pass
self.sock.close()
def getPort(self):
return self.sock.getsockname()[1]
def fileno(self):
return self.sock.fileno()
def runInThread(self):
"""Run the broadcast server loop in its own thread. This is mainly for Jython,
which has problems with multiplexing it using select() with the Name server itself."""
thread=Thread(target=self.__requestLoop)
thread.setDaemon(True)
thread.start()
log.debug("broadcast server loop running in own thread")
def __requestLoop(self):
while self.running:
self.processRequest()
log.debug("broadcast server loop terminating")
def processRequest(self):
try:
data, addr=self.sock.recvfrom(100)
if data==self.REQUEST_NSURI:
responsedata=core.URI(self.nsUri)
if responsedata.host=="0.0.0.0":
# replace INADDR_ANY address by the interface IP adress that connects to the requesting client
try:
interface_ip=socketutil.getInterfaceAddress(addr[0])
responsedata.host=interface_ip
except socket.error:
pass
log.debug("responding to broadcast request from %s: interface %s", addr[0], responsedata.host)
responsedata = str(responsedata).encode("iso-8859-1")
self.sock.sendto(responsedata, 0, addr)
except socket.error:
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def startNSloop(host=None, port=None, enableBroadcast=True, bchost=None, bcport=None, unixsocket=None, nathost=None, natport=None):
"""utility function that starts a new Name server and enters its requestloop."""
daemon=NameServerDaemon(host, port, unixsocket, nathost=nathost, natport=natport)
nsUri=daemon.uriFor(daemon.nameserver)
internalUri=daemon.uriFor(daemon.nameserver, nat=False)
bcserver=None
if unixsocket:
hostip="Unix domain socket"
else:
hostip=daemon.sock.getsockname()[0]
if hostip.startswith("127."):
print("Not starting broadcast server for localhost.")
log.info("Not starting NS broadcast server because NS is bound to localhost")
enableBroadcast=False
if enableBroadcast:
# Make sure to pass the internal uri to the broadcast responder.
# It is almost always useless to let it return the external uri,
# because external systems won't be able to talk to this thing anyway.
bcserver=BroadcastServer(internalUri, bchost, bcport)
print("Broadcast server running on %s" % bcserver.locationStr)
bcserver.runInThread()
print("NS running on %s (%s)" % (daemon.locationStr, hostip))
if daemon.natLocationStr:
print("internal URI = %s" % internalUri)
print("external URI = %s" % nsUri)
else:
print("URI = %s" % nsUri)
try:
daemon.requestLoop()
finally:
daemon.close()
if bcserver is not None:
bcserver.close()
print("NS shut down.")
def startNS(host=None, port=None, enableBroadcast=True, bchost=None, bcport=None, unixsocket=None, nathost=None, natport=None):
"""utility fuction to quickly get a Name server daemon to be used in your own event loops.
Returns (nameserverUri, nameserverDaemon, broadcastServer)."""
daemon=NameServerDaemon(host, port, unixsocket, nathost=nathost, natport=natport)
bcserver=None
nsUri=daemon.uriFor(daemon.nameserver)
if not unixsocket:
hostip=daemon.sock.getsockname()[0]
if hostip.startswith("127."):
# not starting broadcast server for localhost.
enableBroadcast=False
if enableBroadcast:
internalUri=daemon.uriFor(daemon.nameserver, nat=False)
bcserver=BroadcastServer(internalUri, bchost, bcport)
return nsUri, daemon, bcserver
def locateNS(host=None, port=None):
"""Get a proxy for a name server somewhere in the network."""
if host is None:
# first try localhost if we have a good chance of finding it there
if Pyro4.config.NS_HOST in ("localhost", "::1") or Pyro4.config.NS_HOST.startswith("127."):
host = Pyro4.config.NS_HOST
if ":" in host: # ipv6
host="[%s]" % host
uristring="PYRO:%s@%s:%d" % (constants.NAMESERVER_NAME, host, port or Pyro4.config.NS_PORT)
log.debug("locating the NS: %s", uristring)
proxy=core.Proxy(uristring)
try:
proxy.ping()
log.debug("located NS")
return proxy
except PyroError:
pass
# broadcast lookup
if not port:
port=Pyro4.config.NS_BCPORT
log.debug("broadcast locate")
sock=Pyro4.socketutil.createBroadcastSocket(reuseaddr=Pyro4.config.SOCK_REUSE, timeout=0.7)
for _ in range(3):
try:
for bcaddr in Pyro4.config.parseAddressesString(Pyro4.config.BROADCAST_ADDRS):
try:
sock.sendto(BroadcastServer.REQUEST_NSURI, 0, (bcaddr, port))
except socket.error:
x=sys.exc_info()[1]
err=getattr(x, "errno", x.args[0])
if err not in Pyro4.socketutil.ERRNO_EADDRNOTAVAIL: # yeah, windows likes to throw these...
if err not in Pyro4.socketutil.ERRNO_EADDRINUSE: # and jython likes to throw thses...
raise
data, _=sock.recvfrom(100)
sock.close()
if sys.version_info>=(3, 0):
data=data.decode("iso-8859-1")
log.debug("located NS: %s", data)
return core.Proxy(data)
except socket.timeout:
continue
try:
sock.shutdown(socket.SHUT_RDWR)
except (OSError, socket.error):
pass
sock.close()
log.debug("broadcast locate failed, try direct connection on NS_HOST")
# broadcast failed, try PYRO directly on specific host
host=Pyro4.config.NS_HOST
port=Pyro4.config.NS_PORT
# pyro direct lookup
if not port:
port=Pyro4.config.NS_PORT
if ":" in host:
host = "[%s]" % host
if core.URI.isUnixsockLocation(host):
uristring="PYRO:%s@%s" % (constants.NAMESERVER_NAME, host)
else:
uristring="PYRO:%s@%s:%d" % (constants.NAMESERVER_NAME, host, port)
uri=core.URI(uristring)
log.debug("locating the NS: %s", uri)
proxy=core.Proxy(uri)
try:
proxy.ping()
log.debug("located NS")
return proxy
except PyroError as x:
e = NamingError("Failed to locate the nameserver")
e.__cause__=x
raise e
def resolve(uri):
"""Resolve a 'magic' uri (PYRONAME) into the direct PYRO uri."""
if isinstance(uri, basestring):
uri=core.URI(uri)
elif not isinstance(uri, core.URI):
raise TypeError("can only resolve Pyro URIs")
if uri.protocol=="PYRO":
return uri
log.debug("resolving %s", uri)
if uri.protocol=="PYRONAME":
nameserver=locateNS(uri.host, uri.port)
uri=nameserver.lookup(uri.object)
nameserver._pyroRelease()
return uri
else:
raise PyroError("invalid uri protocol")
def main(args):
from optparse import OptionParser
parser=OptionParser()
parser.add_option("-n", "--host", dest="host", help="hostname to bind server on")
parser.add_option("-p", "--port", dest="port", type="int", help="port to bind server on (0=random)")
parser.add_option("-u", "--unixsocket", help="Unix domain socket name to bind server on")
parser.add_option("", "--bchost", dest="bchost", help="hostname to bind broadcast server on (default is \"\")")
parser.add_option("", "--bcport", dest="bcport", type="int",
help="port to bind broadcast server on (0=random)")
parser.add_option("", "--nathost", dest="nathost", help="external hostname in case of NAT")
parser.add_option("", "--natport", dest="natport", type="int", help="external port in case of NAT")
parser.add_option("-x", "--nobc", dest="enablebc", action="store_false", default=True,
help="don't start a broadcast server")
options, args = parser.parse_args(args)
startNSloop(options.host, options.port, enableBroadcast=options.enablebc,
bchost=options.bchost, bcport=options.bcport, unixsocket=options.unixsocket,
nathost=options.nathost, natport=options.natport)
if __name__=="__main__":
main(sys.argv[1:])
|
_DesyTrackerRunControl.py
|
import threading
import time
import click
import pyrogue
class DesyTrackerRunControl(pyrogue.RunControl):
def __init__(self, **kwargs):
rates = {1:'1 Hz', 0:'Auto'}
states = {0: 'Stopped', 1: 'Running', 2: 'Calibration'}
pyrogue.RunControl.__init__(self, rates=rates, states=states, **kwargs)
# These specify the parameters of a run
self.add(pyrogue.LocalVariable(
name = 'CalMeanCount',
description = 'Set number of iterations for mean fitting',
value = 100))
self.add(pyrogue.LocalVariable(
name = 'CalDacMin',
description = 'Min DAC value for calibration',
value = 0))
self.add(pyrogue.LocalVariable(
name = 'CalDacMax',
description = 'Max DAC value for calibration',
value = 255))
self.add(pyrogue.LocalVariable(
name = 'CalDacStep',
description = "DAC increment value for calibration",
value = 1))
self.add(pyrogue.LocalVariable(
name = 'CalDacCount',
description = "Number of iterations to take at each dac value",
value = 1))
self.add(pyrogue.LocalVariable(
name = 'CalChanMin',
description = 'Starting calibration channel',
value = 0))
self.add(pyrogue.LocalVariable(
name = 'CalChanMax',
description = 'Last calibration channel',
value = 1023))
# These are updated during the run
self.add(pyrogue.LocalVariable(
name = 'CalState',
disp = ['Idle', 'Baseline', 'Inject'],
value = 'Idle'))
self.add(pyrogue.LocalVariable(
name = 'CalChannel',
value = 0))
self.add(pyrogue.LocalVariable(
name = 'CalDac',
value = 0))
self.add(pyrogue.LocalVariable(
name = 'TimeoutWait',
value = 0.2,
units = 'Seconds'))
self.add(pyrogue.LocalVariable(
name = 'MaxRunCount',
value = 2**31-1))
@pyrogue.expose
def waitStopped(self):
self._thread.join()
def _setRunState(self,value,changed):
"""
Set run state. Reimplement in sub-class.
Enum of run states can also be overriden.
Underlying run control must update runCount variable.
"""
if changed:
# First stop old threads to avoid overlapping runs
# but not if we are calling from the running thread
if self._thread is not None and self._thread != threading.current_thread():
print('Join')
self._thread.join()
self.thread = None
#self.root.ReadAll()
print('Stopped')
if self.runState.valueDisp() == 'Running':
print("Starting run thread")
self._thread = threading.Thread(target=self._run)
self._thread.start()
elif self.runState.valueDisp() == 'Calibration':
self._thread = threading.Thread(target=self._calibrate)
self._thread.start()
def __triggerAndWait(self):
self.root.waitOnUpdate()
self.root.DesyTracker.EthAcquire()
if self.runRate.valueDisp() == 'Auto':
runCount = self.runCount.value() +1
frameCount = self.root.DataWriter.getDataChannel().getFrameCount()
#print(f'Current count is: {current}. Waiting for: {waitfor}')
start = time.time()
if not self.root.DataWriter.getDataChannel().waitFrameCount(self.runCount.value()+1, int(self.TimeoutWait.value()*1000000)):
end = time.time()
print(f'Timed out waiting for data after {end-start} seconds')
print(f'Current frame count was: {frameCount}. Waiting for: {runCount}')
frameCount = self.root.DataWriter.getDataChannel().getFrameCount()
print(f'Frame count now: {frameCount}')
print('Waiting again')
start = time.time()
if not self.root.DataWriter.getDataChannel().waitFrameCount(self.runCount.value()+1, int(self.TimeoutWait.value()*1000000)):
print('Timed out again')
return False
else:
print(f'Got it this time in {time.time()-start} seconds')
else:
delay = 1.0 / self.runRate.value()
time.sleep(delay)
self.runCount.set(self.runCount.value() + 1)
return True
def __prestart(self):
print('Prestart: Resetting run count')
self.runCount.set(0)
self.root.DataWriter.getDataChannel().setFrameCount(0)
print('Prestart: Resetting Counters')
self.root.CountReset()
time.sleep(.2)
print('Prestart: Reading system state')
self.root.ReadAll()
time.sleep(.2)
print('Prestart: Starting Run')
self.root.DesyTracker.KpixDaqCore.AcquisitionControl.Running.set(True)
time.sleep(.2)
def __endRun(self):
print('')
print('Stopping Run')
self.root.DesyTracker.KpixDaqCore.AcquisitionControl.Running.set(False)
def _run(self):
self.__prestart()
# This will be ignored if configured for external start signal
self.root.DesyTracker.EthStart()
time.sleep(.2)
mode = self.root.DesyTracker.KpixDaqCore.AcquisitionControl.ExtAcquisitionSrc.valueDisp()
with click.progressbar(
iterable = range(self.MaxRunCount.value()),
show_pos = True,
label = click.style('Running ', fg='green')) as bar:
lastFrameCount = 0
while self.runState.valueDisp() == 'Running' and bar.finished is False:
if mode == 'EthAcquire':
self.__triggerAndWait()
bar.update(1)
else:
newFrameCount = self.root.DataWriter.getDataChannel().getFrameCount()
newFrames = newFrameCount-lastFrameCount
lastFrameCount = newFrameCount
bar.update(newFrames)
self.runCount.set(self.runCount.value() + newFrames)
time.sleep(.1)
print('_run Exiting')
self.__endRun()
if self.runState.valueDisp() != 'Stopped':
self.runState.setDisp('Stopped')
def _calibrate(self):
# Latch all of the run settings so they can't be changed mid-run
meanCount = self.CalMeanCount.value()
dacMin = self.CalDacMin.value()
dacMax = self.CalDacMax.value()
dacStep = self.CalDacStep.value()
dacCount = self.CalDacCount.value()
firstChan = self.CalChanMin.value()
lastChan = self.CalChanMax.value()
# Configure firmware for calibration
acqCtrl = self.root.DesyTracker.KpixDaqCore.AcquisitionControl
acqCtrl.ExtTrigSrc.setDisp('Disabled', write=True)
acqCtrl.ExtTimestampSrc.setDisp('Disabled', write=True)
acqCtrl.ExtAcquisitionSrc.setDisp('EthAcquire', write=True)
acqCtrl.ExtStartSrc.setDisp('EthStart', write=True)
acqCtrl.Calibrate.set(True, write=True)
self.runRate.setDisp('Auto')
self.root.ReadAll()
# Put asics in calibration mode
kpixAsics = [self.root.DesyTracker.KpixDaqCore.KpixAsicArray.KpixAsic[i] for i in range(24)]
kpixAsics = [kpix for kpix in kpixAsics if kpix.enable.get() is True] #small speed hack maybe
for kpix in kpixAsics:
kpix.setCalibrationMode()
# Restart the run count
self.__prestart()
self.root.DesyTracker.EthStart()
time.sleep(1)
# First do baselines
self.CalState.set('Baseline')
with click.progressbar(
iterable= range(meanCount),
show_pos = True,
label = click.style('Running baseline: ', fg='green')) as bar:
for i in bar:
if self.runState.valueDisp() == 'Calibration':
self.__triggerAndWait()
else:
self.__endRun()
return
dac = 0
channel = 0
chanSweep = range(firstChan, lastChan+1)
chanLoops = len(list(chanSweep))
dacSweep = range(dacMin, dacMax+1, dacStep)
dacLoops = len(list(dacSweep))
totalLoops = chanLoops * dacLoops
def getDacChan(item):
return f'Channel: {channel}, DAC: {dac}'
# Calibration
self.CalState.set('Inject')
with click.progressbar(
length = totalLoops,
show_pos = True,
item_show_func=getDacChan,
label = click.style('Running Injection: ', fg='green')) as bar:
for channel in chanSweep:
for dac in dacSweep:
bar.update(1)
with self.root.updateGroup():
# Set these to log in event stream
self.CalChannel.set(channel)
self.CalDac.set(dac)
# Configure each kpix for channel and dac
for kpix in kpixAsics:
# This occasionally fails so retry 10 times
for retry in range(10):
try:
#start = time.time()
kpix.setCalibration(channel, dac)
#print(f'Set new kpix settings in {time.time()-start} seconds')
break
except pyrogue.MemoryError as e:
if retry == 9:
raise e
else:
print(f'{kpix.path}.setCalibration({channel}, {dac}) failed. Retrying')
# Send acquire command and wait for response
for count in range(dacCount):
if self.runState.valueDisp() == 'Calibration':
self.__triggerAndWait()
else:
self.__endRun()
return
self.__endRun()
if self.runState.getDisp() != 'Stopped':
self.runState.setDisp('Stopped')
|
main.py
|
'''
Herramientas necesarias para el experimento:
- OpenBCI
- Script en Python:
- Lanzar imágenes
- Capturar señales mediante LSL
Procedimiento a seguir:
- Iniciamos OpenBCI (para capturar del BCI)
- Empezamos a capturar con LSL ON
- Iniciamos script "main.py"
- Se lanzará un hilo de captura de señal
- Una vez demos a [ENTER] empezará el experimento.
NOTAS:
- Es necesario introducir cada uno de los parámetros al iniciar la aplicación.
- Los valores default de los parámetros de entrada pueden no ser apropiados para el experimento a realizar.
'''
import logging
from psychopy import logging as psycholog
from termcolor import colored
psycholog.console.setLevel(logging.CRITICAL)
import numpy as np
from random import randint
import pandas as pd
from psychopy import visual, core, event, monitors
from time import time
from pylsl import StreamInfo, StreamOutlet, local_clock
from glob import glob
from random import choice
import os
os.system('color')
from datetime import datetime
import argparse
import requests
import json
import shutil
from record import record_experiment
from threading import Thread
VERSION = '1.0beta'
API = 'WzOc1qxm5F5RRnGik4f6A2qQ3sogZyH8W5yFiNOURKw'
def printInfo(string):
print(colored('[!] ' + string, 'yellow'))
def printError(string):
print(colored('[!] ' + string, 'red'))
def printSuccess(string):
print(colored('[!] ' + string, 'green'))
def main():
'''
[!] FUNCIÓN principal del Trabajo Fin de Grado
Es lanzado junto a los parámetros adecuados y se encargará de llevar a cabo el experimento.
Manejará la construcción del hilo de captura de señal EEG, así como de mostrar las imágenes en pantalla.
'''
banner = """
██████╗ ██████╗██╗ ████████╗███████╗ ██████╗
██╔══██╗██╔════╝██║ ╚══██╔══╝██╔════╝██╔════╝
██████╔╝██║ ██║█████╗██║ █████╗ ██║ ███╗
██╔══██╗██║ ██║╚════╝██║ ██╔══╝ ██║ ██║
██████╔╝╚██████╗██║ ██║ ██║ ╚██████╔╝
╚═════╝ ╚═════╝╚═╝ ╚═╝ ╚═╝ ╚═════╝
Enrique Tomás Martínez Beltrán
"""
print(colored(banner, 'yellow'))
parser = argparse.ArgumentParser(description='Obtención de señal EEG. Ejecución del experimento.', add_help=False)
parser.add_argument('-n', '--name', dest='name',
default="exp_{}".format(datetime.now().strftime("%d-%m-%Y-%H-%M-%S")),
help='Nombre del experimento')
parser.add_argument('-dim', '--dim', dest='size_monitor', default=[1920, 1080],
help='Dimensiones de la pantalla (default [1920,1080])')
parser.add_argument('-dm', '--distmon', dest='distance_monitor', default=67,
help='Distancia al monitor -en centímetros- (default 67)')
parser.add_argument('-m', '--mode', dest='mode', default=2,
help='Modo de ejecución del programa (default 2)')
# parser.add_argument('-t', '--time', dest='time', default=20,
# help='Tiempo de duración de la grabación')
parser.add_argument('-i', '--images', dest='images', default=30,
help='Número de imágenes distintas utilizadas en el experimento (default 30)')
parser.add_argument('-p', '--prob', dest='prob_target', default=0.1,
help='Probabilidad de aparición del Target en el experimento -tanto por 1- (default 0.1)')
parser.add_argument('-tt', dest='target_time', default=5,
help='Tiempo de visualización del target -en segundos- (default 5)')
parser.add_argument('-in', dest='image_interval', default=0.250,
help='Tiempo transcurrido entre imágenes -en segundos- (default 0.250)')
parser.add_argument('-io', dest='image_offset', default=0.150,
help='Tiempo offset de cada imagen -en segundos- (default 0.150)')
parser.add_argument('-j', dest='jitter', default=0.2,
help='Tiempo jitter variable al mostrar imagen -en segundos- (default 0.2)')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + VERSION, help="Versión del programa.")
parser.add_argument('-a', '--about', action='version',
version='Creado por Enrique Tomás Martínez Beltrán',
help="Información sobre el creador del programa.")
parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help='Ayuda sobre la utilización del programa.')
args = parser.parse_args()
experiment = args.name
experiment = 'exp_23-07-2020-00-28-33'
# experiment_time = float(args.time)
mode = args.mode
total_img = int(args.images)
size_monitor = args.size_monitor
prob_target = float(args.prob_target)
distance_monitor = int(args.distance_monitor)
try:
if not os.path.isdir('experiments/' + experiment):
os.makedirs("experiments/{}/target".format(experiment))
os.makedirs("experiments/{}/no_target".format(experiment))
os.makedirs("experiments/{}/records".format(experiment))
if not os.listdir('experiments/{}/target'.format(experiment)) or not os.listdir(
'experiments/{}/no_target'.format(experiment)):
if (mode == 1):
printInfo("Modo 1 seleccionado (Modo manual)")
# Las imágenes son añadidas manualmente, únicamente se obtienen con la aplicación
elif (mode == 2):
printInfo("Modo 2 seleccionado (Modo automático)")
printInfo("Descargando recursos...")
url_all = "https://api.unsplash.com/photos/random?count={}".format(total_img)
headers = {
'Authorization': 'Client-ID {}'.format(API)
}
response = requests.get(url_all, headers=headers, stream=True)
response_json = json.loads(response.text)
is_target = False
count = 0
for image in response_json:
url = image['urls']['raw']
response = requests.get(url + '&fm=jpg&fit=crop&w=1920&h=1080&q=80&fit=max', headers=headers,
stream=True)
if not is_target:
with open('experiments/{}/target/target.jpeg'.format(experiment), 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
is_target = True
continue
with open('experiments/{}/no_target/no_target_{}.jpeg'.format(experiment, count),
'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
count = count + 1
image_interval = float(args.image_interval)
image_offset = float(args.image_offset)
jitter = float(args.jitter)
target_time = int(args.target_time)
# Lanzamos el hilo
# Esperará hasta que pulsamos <enter> para empezar la captura sincronizada.
# Tiempo aproximado del experimento.
# Será utilizado por el hilo para grabar X segundos (serán siempre más para evitar cortes en el experimento)
experiment_time = total_img * image_interval * image_offset + total_img / 2
process = Thread(target=record_experiment, args=[experiment, experiment_time])
process.start()
print()
printInfo("Nombre del experimento: " + experiment)
printInfo("Dimensiones de la pantalla: ancho={} | alto={}".format(size_monitor[0], size_monitor[1]))
printInfo("Ruta del experimento: experiments/{}".format(experiment))
printInfo("Duración aproximada del experimento: " + str(experiment_time) + " s")
printInfo("Tiempo devisualización de Target pre-experimento: " + str(target_time * 1000) + " ms")
printInfo("Intervalo entre imágenes: " + str(image_interval * 1000) + " ms")
printInfo("Probabilidad de aparición Target: " + str(prob_target * 100) + " %")
if jitter:
printInfo("Jitter: " + str(jitter * 1000) + " ms")
try:
'''
Recuperamos metadata.txt del experimento (si existe)
'''
images = pd.read_csv('experiments/{}/metadata.txt'.format(experiment))
except:
printError("Metadata no encontrado, creando metadata aleatorio...")
'''
1 -> TARGET
0 -> NO TARGET
'''
# Array con total_images de [0,1] con 0.1 de probabilidad el 1 -> TARGET
img_types = np.random.binomial(1, prob_target, total_img)
# Ajuste para evitar 2 o más Target seguidos
def check(lst):
caux = 0
last = lst[0]
for i, num in enumerate(lst[1:]):
if num == 1 and last == 1:
caux = caux + 1
lst[i] = 0
last = num
return caux
n = check(img_types)
for i in range(n):
while (True):
r = randint(0, len(img_types))
if img_types[r] != 1:
img_types[r] = 1
if (check(img_types)):
continue
else:
break
images = pd.DataFrame(dict(img_type=img_types,
timestamp=np.zeros(total_img)))
images.to_csv('experiments/{}/metadata.txt'.format(experiment), index=False)
print()
printInfo("DataFrame generado: ")
print()
print(images)
print()
mon = monitors.Monitor('asusmon')
mon.setDistance(distance_monitor)
window = visual.Window(size_monitor, monitor=mon, units="pix",
fullscr=False, color=[-1, -1, -1])
def cargarImagen(file):
nonlocal window
return visual.ImageStim(win=window, image=file, size=size_monitor)
targets = []
no_targets = []
t_argets = glob('experiments/{}/target/*.jpeg'.format(experiment))
for i in t_argets:
targets.append(cargarImagen(i))
not_argets = glob('experiments/{}/no_target/*.jpeg'.format(experiment))
for i in not_argets:
no_targets.append(cargarImagen(i))
text1 = visual.TextBox(window=window,
text='[Trabajo Fin de Grado - Enrique Tomás Martínez Beltrán]',
font_size=20,
font_color=[1, 1, 1],
textgrid_shape=[55, 2],
pos=(0.0, 0.6),
# border_color=[-1, -1, 1, 1],
# border_stroke_width=4,
# grid_color=[1, -1, -1, 0.5],
# grid_stroke_width=1
)
text2 = visual.TextBox(window=window,
text='Presiona <enter> para comenzar el experimento...',
font_size=20,
font_color=[1, 1, 1],
textgrid_shape=[48, 2],
pos=(0.0, 0.3),
# border_color=[-1, -1, 1, 1],
# border_stroke_width=4,
# grid_color=[1, -1, -1, 0.5],
# grid_stroke_width=1
)
text3 = visual.TextBox(window=window,
text='Fin del experimento...',
font_size=20,
font_color=[1, 1, 1],
textgrid_shape=[55, 2],
pos=(0.0, 0.6),
# border_color=[-1, -1, 1, 1],
# border_stroke_width=4,
# grid_color=[1, -1, -1, 0.5],
# grid_stroke_width=1
)
text4 = visual.TextBox(window=window,
text='¡Gracias por participar!',
font_size=20,
font_color=[1, 1, 1],
textgrid_shape=[48, 2],
pos=(0.0, 0.3),
# border_color=[-1, -1, 1, 1],
# border_stroke_width=4,
# grid_color=[1, -1, -1, 0.5],
# grid_stroke_width=1
)
logo_umu = visual.ImageStim(win=window, image="experiments/umu.jpg", units='pix')
logo_umu.pos += -0.3
logo_umu.size = [610, 140]
text1.draw()
text2.draw()
logo_umu.draw()
window.flip()
'''
Si presionamos [ENTER] -> Iniciamos el experimento
Creamos Estimulo Stream para que sea detectado por el hilo
'''
key = event.waitKeys()
while ('return' not in key):
key = event.waitKeys()
core.wait(3)
'''
Mostramos Target, el experimento comenzará después de mostrar X segundos la imagen Target
'''
target = choice(targets)
target.draw()
window.flip()
core.wait(target_time)
window.flip()
info = StreamInfo('Estimulo', 'Estimulo', 1, 0, 'int32', 'estimulo12310')
outlet = StreamOutlet(info)
nImage = 0
nTarget = 0
nNoTarget = 0
for i, trial in images.iterrows():
# Intervalo entre imágenes
core.wait(image_interval + np.random.rand() * jitter)
img_type = images['img_type'].iloc[i]
image = choice(targets if img_type == 1 else no_targets)
nImage = nImage + 1
if img_type == 1:
nTarget = nTarget + 1
else:
nNoTarget = nNoTarget + 1
image.draw()
timestamp = local_clock()
images.at[i, 'timestamp'] = timestamp
'''
Si img_type = 1 -> Target -> Out=1
Si img_type = 0 -> NoTarget -> Out=2
# El Out implica escritura en csv final
'''
outlet.push_sample([2 if img_type == 0 else 1], timestamp)
window.flip()
# window.update()
# offset
core.wait(image_offset)
# window.flip()
# if len(event.getKeys()) > 0 or (time() - start) > experiment_time:
# break
if event.getKeys() == 'Esc':
printError('Cancelando experimento...')
break
event.clearEvents()
core.wait(1.5)
text3.draw()
text4.draw()
window.flip()
core.wait(5)
window.close()
process.join()
print()
printSuccess('---------------------------------------------')
printSuccess("Datos del experimento en: experiments/{}".format(experiment))
printSuccess('---------------------------------------------')
printSuccess('Experimento finalizado')
printSuccess("Número de imágenes mostradas: " + str(nImage))
printSuccess("Número de imágenes Target mostradas: " + str(nTarget))
printSuccess("Número de imágenes Non-Target mostradas: " + str(nNoTarget))
printSuccess('---------------------------------------------')
print()
printInfo("DataFrame final: ")
print()
print(images)
core.quit()
except KeyboardInterrupt:
printError('Cancelando experimento...')
window.close()
core.quit()
if __name__ == '__main__':
main()
|
sensor.py
|
# coding=utf-8
import subprocess
import re
import pdb
from util import *
from create_merge_topo import *
class sensor(object):
def __init__(self, id, nh, net, config_file, hosts=[], active = True, passive = True,
known_ips = [], max_fail = 5, simulation = False, readmit = True):
'''
:param id: the id of this sensor. It must be the ID of the node on which the sensor is placed.
:param nh: the number of hosts (monitors) to be used when they are chosen randomly to run iTop(if hosts=[])
:param net: Mininet network reference
:param config_file: Configuration file for the blockchain client run by this sensor
:param hosts: the list of hosts (monitors) to be used to run iTop
:param active: tell whether the sensor should have active capabilities (e.g. ping)
:param passive: tell whether the sensor should have passive capabilities (sniff traffic)
:param known_ips: list of known ips. Could even start a sensor with empty list
:param max_fail: maximum number of consecutive PING failures that is tollerated before declaring a node dead
:param simulation: True if the sensor has to be run on a Mininet host. The active sensor capabilities change.
:param readmit: If set to False, an ip will never be readmitted in the topology after it has been declared
'dead'. Set to False when the sensor is used in Mininet. Default: 'True'.
'''
self.__id = id
self.__active = active
self.__passive = passive
self.__known_ips = known_ips
self.__fail_count = {} # Counts the number of unsuccessful, consecutive ping replies for each known ip
for ip in known_ips:
self.__fail_count[ip] = 0
self.__max_fail = max_fail
self.__end = False
self.__simulation = simulation
self.__readmit = readmit
self.__dead = [] # Updated when found a dead node. Set empty soon after the dead node has been managed.
self.__banned = []
self.__new = [] # Updated when found a new node. Set empty soon after the new node has been managed.
self.__nh = nh
self.__hosts = hosts
self.__net = net
self.__alias = create_alias()
self.__c = configure_client(config_file)
#pdb.set_trace()
register_client(self.__c)
#TODO conviene avere metodi synchronized per accedere in scrittura a S.D. condivise
def start(self):
''' Starts the sensor.'''
if self.__active:
if self.__simulation:
threading.Thread(target=self.active_sensor_on_mininet).start()
else:
threading.Thread(target=self.active_sensor).start()
if self.__passive:
threading.Thread(target=self.passive_sensor).start()
threading.Thread(target=self.run).start()
threading.Thread(target=self.wait_user_input).start()
def run(self):
while not self.__end:
if self.__active:
self.check_dead_nodes()
if self.__passive:
self.check_new_nodes()
time.sleep(10)
def active_sensor(self):
'''Runs active sensor capabilities'''
while not self.__end:
for ip in self.__known_ips:
try:
p = subprocess.Popen(['ping', '-c', '1', ip], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
result = p.returncode
if result != 0: # Not received correct reply
self.handle_unsuccessful_ping(ip)
else:
self.__fail_count[ip] = 0
except subprocess.CalledProcessError:
print 'Error with the ping subprocess'
time.sleep(10)
def active_sensor_on_mininet(self):
'''Runs active sensor capabilities on a Mininet host'''
while not self.__end:
for ip in self.__known_ips:
#pdb.set_trace()
s = self.__net[self.__id]
result = s.cmd('ping -c 1 ' + ip + ' | grep received | awk \'{print $4}\'')
print 'PING ' + s.IP() + ' -> ' + ip + ' : ' + result.rstrip() + '/1 pacchetti ricevuti correttamente\n'
try:
if int(result) != 1: # Not received the correct packet back
self.handle_unsuccessful_ping(ip)
else:
print self.__fail_count
self.__fail_count[ip] = 0
print '\nAfter Success, Fail count for ' + ip + '= ' + str(self.__fail_count[ip])
print self.__fail_count
except ValueError:
self.handle_unsuccessful_ping(ip)
time.sleep(10)
def handle_unsuccessful_ping(self, ip):
self.__fail_count[ip] = self.__fail_count[ip] + 1
print '\nFail count for ' + ip + ' = ' + str(self.__fail_count[ip])
if self.__fail_count[ip] > self.__max_fail:
self.__dead.append(ip)
if not self.__readmit:
self.__banned.append(ip)
print '\nBanned ' + ip
self.__known_ips.remove(ip)
del self.__fail_count[ip]
def passive_sensor(self):
'''Runs passive sensor capabilities'''
# Limitazione sulla sottorete se topologia simulata su Mininet
cmd = ['sudo', 'tcpdump', '-l', '-i', 'any', 'net', '192.168.0.0/16'] if self.__simulation \
else ['sudo', 'tcpdump', '-l', '-i', 'any']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
print '------------------- START SNIFFING ----------------------'
for row in iter(p.stdout.readline, b''):
src_ip = row.split()[2]
dst_ip = row.split()[4]
s_match = re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", src_ip)
d_match = re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", dst_ip)
if s_match:
self.handle_match(self.clean_ip(src_ip))
if d_match:
self.handle_match(self.clean_ip(dst_ip))
if self.__end:
p.terminate()
break
def passive_sensor(self):
'''Runs passive sensor capabilities'''
# Limitazione sulla sottorete se topologia simulata su Mininet
cmd = ['sudo', 'tcpdump', '-l', '-i', 'any', 'net', '192.168.0.0/16'] if self.__simulation \
else ['sudo', 'tcpdump', '-l', '-i', 'any']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
print '------------------- START SNIFFING ----------------------'
for row in iter(p.stdout.readline, b''):
src_ip = row.split()[2]
dst_ip = row.split()[4]
s_match = re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", src_ip)
d_match = re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", dst_ip)
if s_match:
self.handle_match(self.clean_ip(src_ip))
if d_match:
self.handle_match(self.clean_ip(dst_ip))
if self.__end:
p.terminate()
break
def handle_match(self, ip):
if ip not in self.__banned:
if ip not in self.__known_ips:
print '\nNew IP discovered: ' + ip
self.__new.append(ip)
self.__known_ips.append(ip)
self.__fail_count[ip] = 0
#TODO Thw passive sensor could be used to reset the fail count. It is sufficient to move the last line
#of the function after the second if (not inside). Anyway, now this functionality is disabled, because
# MIninet has switches that send IP packets but do not respond to ping, and that definetely should not
# belong to topology
def check_new_nodes(self):
'''Checks whether the passive sensor found traffic dealing with a new, unknown host.
In such a case, run a new instance of iTop and update the topology in the ledger.'''
if (len(self.__new)) != 0:
out = self.iTop()
topo = get_topo_from_json(out)
trans = get_transactions_from_topo(topo)
self.__c.send_transactions(trans)
self.__new = []
def check_dead_nodes(self):
'''Checks whether the active sensor discovered a dead node (ip address non responding to ping for more
than max times). In that case, tell the Blockchain servers that such a node no longer exists.'''
trans = []
for n in self.__dead:
print '\nDead node: ' + n + '\n'
try:
tx = transaction(self.__alias[n], None, False)
trans.append(tx)
except KeyError:
print '\n' + n + ' does not belong to the topology\n' # Only because we are in a simulation
self.__dead.remove(n)
if len(trans) > 0:
self.__c.send_transactions(trans)
self.__dead = []
def clean_ip(self, raw_ip):
'Clean the ip. A slightly different cleaning is done based on whether the ip is source or destination.'
#bytes = raw_ip.split('.')
bytes = re.split('\.|:', raw_ip)
return bytes[0] + '.' + bytes[1] + '.' + bytes[2] + '.' + bytes[3]
def wait_user_input(self):
while not self.__end:
#TODO if received user input to stop, stop the sensor. Migliora interfaccia di stop
choice = raw_input("Type 'Q' to quit the sensor.\n")
if choice =='Q' or choice == 'q':
self.stop()
def iTop(self):
'''Runs iTop on the existing topology and returns the filename of the induced topology.'''
hosts = self.__hosts
if len(self.__hosts) == 0:
hosts = get_hosts(int(self.__nh))
(vtopo, traces) = create_virtual_topo_and_traces(self.__alias, hosts)
(M, C) = create_merge_options(vtopo, traces)
(M, mtopo) = create_merge_topology(M, vtopo, C)
#print_topo(mtopo)
out = write_topo_to_file(self.__id, mtopo, hosts)
return out
#TODO lo stoppi in questo modo? Considera se devi proteggere variabili con lock o no
def stop(self):
self.__end = True
|
MultiprocessingDropbox.py
|
# Tai Sakuma <tai.sakuma@gmail.com>
from __future__ import print_function
import logging
import multiprocessing
import threading
from operator import itemgetter
from collections import deque
from ..progressbar import NullProgressMonitor
from .TaskPackage import TaskPackage
from .Worker import Worker
##__________________________________________________________________||
# https://docs.python.org/3/howto/logging-cookbook.html#logging-to-a-single-file-from-multiple-processes
def logger_thread(queue):
while True:
record = queue.get()
if record is None:
break
logger = logging.getLogger(record.name)
logger.handle(record)
##__________________________________________________________________||
class MultiprocessingDropbox(object):
def __init__(self, nprocesses=16, progressMonitor=None):
if nprocesses <= 0:
raise ValueError("nprocesses must be at least one: {} is given".format(nprocesses))
self.progressMonitor = NullProgressMonitor() if progressMonitor is None else progressMonitor
self.n_max_workers = nprocesses
self.workers = [ ]
self.task_queue = multiprocessing.JoinableQueue()
self.result_queue = multiprocessing.Queue()
self.logging_queue = multiprocessing.Queue()
self.lock = multiprocessing.Lock()
self.n_ongoing_tasks = 0
self.task_idx = -1 # so it starts from 0
def __repr__(self):
return '{}(progressMonitor={!r}, n_max_workers={!r}, n_ongoing_tasks={!r}, task_idx={!r})'.format(
self.__class__.__name__,
self.progressMonitor,
self.n_max_workers,
self.n_ongoing_tasks,
self.task_idx
)
def open(self):
if len(self.workers) >= self.n_max_workers:
# workers already created
return
# start logging listener
self.loggingListener = threading.Thread(
target=logger_thread, args=(self.logging_queue,)
)
self.loggingListener.start()
# start workers
for i in range(self.n_max_workers):
worker = Worker(
task_queue=self.task_queue,
result_queue=self.result_queue,
logging_queue=self.logging_queue,
progressReporter=self.progressMonitor.createReporter(),
lock=self.lock
)
worker.start()
self.workers.append(worker)
self.to_return = deque()
def put(self, package):
self.task_idx += 1
self.task_queue.put((self.task_idx, package))
self.n_ongoing_tasks += 1
return self.task_idx
def put_multiple(self, packages):
task_idxs = [ ]
for package in packages:
task_idxs.append(self.put(package))
return task_idxs
def poll(self):
"""Return pairs of task indices and results of finished tasks.
"""
messages = list(self.to_return) # a list of (task_idx, result)
self.to_return.clear()
messages.extend(self._receive_finished())
# sort in the order of task_idx
messages = sorted(messages, key=itemgetter(0))
return messages
def receive_one(self):
"""Return a pair of a package index and a result.
This method waits until a task finishes.
This method returns None if no task is running.
"""
if self.to_return:
return self.to_return.popleft()
if self.n_ongoing_tasks == 0:
return None
while not self.to_return:
self.to_return.extend(self._receive_finished())
return self.to_return.popleft()
def receive(self):
"""Return pairs of task indices and results.
This method waits until all tasks finish.
"""
messages = list(self.to_return) # a list of (task_idx, result)
self.to_return.clear()
while self.n_ongoing_tasks >= 1:
messages.extend(self._receive_finished())
# sort in the order of task_idx
messages = sorted(messages, key=itemgetter(0))
return messages
def _receive_finished(self):
messages = [ ] # a list of (task_idx, result)
while not self.result_queue.empty():
message = self.result_queue.get()
messages.append(message)
self.n_ongoing_tasks -= 1
return messages
def terminate(self):
for worker in self.workers:
worker.terminate()
# wait until all workers are terminated.
while any([w.is_alive() for w in self.workers]):
pass
self.workers = [ ]
def close(self):
# end workers
if self.workers:
for i in range(len(self.workers)):
self.task_queue.put(None)
self.task_queue.join()
self.workers = [ ]
# end logging listener
self.logging_queue.put(None)
self.loggingListener.join()
##__________________________________________________________________||
|
test_lib.py
|
#!/usr/bin/env python
"""A library for tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import datetime
import doctest
import email
import functools
import itertools
import logging
import os
import shutil
import threading
import time
import unittest
from unittest import mock
from absl.testing import absltest
import pkg_resources
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.util import cache
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import precondition
from grr_response_core.lib.util import temp
from grr_response_core.stats import stats_collector_instance
from grr_response_server import access_control
from grr_response_server import client_index
from grr_response_server import data_store
from grr_response_server import email_alerts
from grr_response_server import prometheus_stats_collector
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import testing_startup
FIXED_TIME = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(
8, rdfvalue.DAYS)
TEST_CLIENT_ID = "C.1000000000000000"
class GRRBaseTest(absltest.TestCase):
"""This is the base class for all GRR tests."""
def __init__(self, methodName=None): # pylint: disable=g-bad-name
"""Hack around unittest's stupid constructor.
We sometimes need to instantiate the test suite without running any tests -
e.g. to start initialization or setUp() functions. The unittest constructor
requires to provide a valid method name.
Args:
methodName: The test method to run.
"""
super(GRRBaseTest, self).__init__(methodName=methodName or "__init__")
self.base_path = config.CONFIG["Test.data_dir"]
@classmethod
def setUpClass(cls):
super().setUpClass()
logging.disable(logging.CRITICAL)
@classmethod
def tearDownClass(cls):
logging.disable(logging.NOTSET)
super().tearDownClass()
def setUp(self):
super(GRRBaseTest, self).setUp()
test_user = u"test"
system_users_patcher = mock.patch.object(
access_control, "SYSTEM_USERS",
frozenset(itertools.chain(access_control.SYSTEM_USERS, [test_user])))
system_users_patcher.start()
self.addCleanup(system_users_patcher.stop)
self.token = access_control.ACLToken(
username=test_user, reason="Running tests")
self.temp_dir = temp.TempDirPath()
config.CONFIG.SetWriteBack(os.path.join(self.temp_dir, "writeback.yaml"))
self.addCleanup(lambda: shutil.rmtree(self.temp_dir, ignore_errors=True))
# Each datastore is wrapped with DatabaseValidationWrapper, so we have
# to access the delegate directly (assuming it's an InMemoryDB
# implementation).
data_store.REL_DB.delegate.ClearTestDB()
email_alerts.InitializeEmailAlerterOnce()
# Stub out the email function
self.emails_sent = []
def SendEmailStub(to_user, from_user, subject, message, **unused_kwargs):
self.emails_sent.append((to_user, from_user, subject, message))
self.mail_stubber = utils.MultiStubber(
(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmailStub),
(email.utils, "make_msgid", lambda: "<message id stub>"))
self.mail_stubber.Start()
self.addCleanup(self.mail_stubber.Stop)
# We don't want to send actual email in our tests
self.smtp_patcher = mock.patch("smtplib.SMTP")
self.mock_smtp = self.smtp_patcher.start()
self.addCleanup(self.smtp_patcher.stop)
def DisabledSet(*unused_args, **unused_kw):
raise NotImplementedError(
"Usage of Set() is disabled, please use a configoverrider in tests.")
self.config_set_disable = utils.Stubber(config.CONFIG, "Set", DisabledSet)
self.config_set_disable.Start()
self.addCleanup(self.config_set_disable.Stop)
self._SetupFakeStatsContext()
# Turn off WithLimitedCallFrequency-based caching in tests. Tests that need
# to test caching behavior explicitly, should turn it on explicitly.
with_limited_call_frequency_stubber = utils.Stubber(
cache, "WITH_LIMITED_CALL_FREQUENCY_PASS_THROUGH", True)
with_limited_call_frequency_stubber.Start()
self.addCleanup(with_limited_call_frequency_stubber.Stop)
def _SetupFakeStatsContext(self):
"""Creates a stats context for running tests based on defined metrics."""
# Reset stats_collector_instance to None, then reinitialize it.
patcher = mock.patch.object(stats_collector_instance, "_stats_singleton",
None)
patcher.start()
self.addCleanup(patcher.stop)
stats_collector_instance.Set(
prometheus_stats_collector.PrometheusStatsCollector())
def SetupClient(self,
client_nr,
arch="x86_64",
fqdn=None,
labels=None,
last_boot_time=None,
install_time=None,
kernel="4.0.0",
os_version="buster/sid",
ping=None,
system="Linux",
users=None,
memory_size=None,
add_cert=True,
fleetspeak_enabled=False):
"""Prepares a test client mock to be used.
Args:
client_nr: int The GRR ID to be used. 0xABCD maps to C.100000000000abcd in
canonical representation.
arch: string
fqdn: string
labels: list of labels (strings)
last_boot_time: RDFDatetime
install_time: RDFDatetime
kernel: string
os_version: string
ping: RDFDatetime
system: string
users: list of rdf_client.User objects.
memory_size: bytes
add_cert: boolean
fleetspeak_enabled: boolean
Returns:
the client_id: string
"""
client = self._SetupTestClientObject(
client_nr,
add_cert=add_cert,
arch=arch,
fqdn=fqdn,
install_time=install_time,
labels=labels,
last_boot_time=last_boot_time,
kernel=kernel,
memory_size=memory_size,
os_version=os_version,
ping=ping or rdfvalue.RDFDatetime.Now(),
system=system,
users=users,
fleetspeak_enabled=fleetspeak_enabled)
return client.client_id
def SetupClients(self, nr_clients, *args, **kwargs):
"""Prepares nr_clients test client mocks to be used."""
return self.SetupClientsWithIndices(range(nr_clients), *args, **kwargs)
def SetupClientsWithIndices(self, indices, *args, **kwargs):
"""Sets up mock clients, one for each numerical index in 'indices'."""
return [self.SetupClient(i, *args, **kwargs) for i in indices]
def _TestClientInfo(self, labels=None):
res = rdf_client.ClientInformation(
client_name="GRR Monitor",
client_version=config.CONFIG["Source.version_numeric"],
build_time="1980-01-01")
if labels is None:
res.labels = ["label1", "label2"]
else:
res.labels = labels
return res
def _TestInterfaces(self, client_nr):
ip1 = rdf_client_network.NetworkAddress()
ip1.human_readable_address = "192.168.0.%d" % client_nr
ip2 = rdf_client_network.NetworkAddress()
ip2.human_readable_address = "2001:abcd::%x" % client_nr
mac1 = rdf_client_network.MacAddress.FromHumanReadableAddress(
"abcbccddee%02x" % client_nr)
mac1 = rdf_client_network.MacAddress.FromHumanReadableAddress(
"aabbccddee%02x" % client_nr)
mac2 = rdf_client_network.MacAddress.FromHumanReadableAddress(
"bbccddeeff%02x" % client_nr)
return [
rdf_client_network.Interface(ifname="if0", addresses=[ip1, ip2]),
rdf_client_network.Interface(ifname="if1", mac_address=mac1),
rdf_client_network.Interface(ifname="if2", mac_address=mac2),
]
def _SetupTestClientObject(self,
client_nr,
add_cert=True,
arch="x86_64",
fqdn=None,
install_time=None,
last_boot_time=None,
kernel="4.0.0",
memory_size=None,
os_version="buster/sid",
ping=None,
system="Linux",
users=None,
labels=None,
fleetspeak_enabled=False):
"""Prepares a test client object."""
client_id = u"C.1%015x" % client_nr
client = rdf_objects.ClientSnapshot(client_id=client_id)
client.startup_info.client_info = self._TestClientInfo(labels=labels)
if last_boot_time is not None:
client.startup_info.boot_time = last_boot_time
client.knowledge_base.fqdn = fqdn or "Host-%x.example.com" % client_nr
client.knowledge_base.os = system
client.knowledge_base.users = users or [
rdf_client.User(username=u"user1"),
rdf_client.User(username=u"user2"),
]
client.os_version = os_version
client.arch = arch
client.kernel = kernel
client.interfaces = self._TestInterfaces(client_nr)
client.install_time = install_time
client.hardware_info = rdf_client.HardwareInfo(
system_manufacturer="System-Manufacturer-%x" % client_nr,
bios_version="Bios-Version-%x" % client_nr)
if memory_size is not None:
client.memory_size = memory_size
ping = ping or rdfvalue.RDFDatetime.Now()
if add_cert:
cert = self.ClientCertFromPrivateKey(config.CONFIG["Client.private_key"])
else:
cert = None
data_store.REL_DB.WriteClientMetadata(
client_id,
last_ping=ping,
certificate=cert,
fleetspeak_enabled=fleetspeak_enabled)
data_store.REL_DB.WriteClientSnapshot(client)
client_index.ClientIndex().AddClient(client)
if labels is not None:
data_store.REL_DB.AddClientLabels(client_id, u"GRR", labels)
client_index.ClientIndex().AddClientLabels(client_id, labels)
return client
def AddClientLabel(self, client_id, owner, name):
data_store.REL_DB.AddClientLabels(client_id, owner, [name])
client_index.ClientIndex().AddClientLabels(client_id, [name])
def ClientCertFromPrivateKey(self, private_key):
common_name = rdf_client.ClientURN.FromPrivateKey(private_key)
csr = rdf_crypto.CertificateSigningRequest(
common_name=common_name, private_key=private_key)
return rdf_crypto.RDFX509Cert.ClientCertFromCSR(csr)
def GenerateToken(self, username, reason):
return access_control.ACLToken(username=username, reason=reason)
class ConfigOverrider(object):
"""A context to temporarily change config options."""
def __init__(self, overrides):
self._overrides = overrides
self._old_cache = None
self._old_global_override = None
def __enter__(self):
self.Start()
def Start(self):
self._old_cache = config.CONFIG.cache
config.CONFIG.cache = dict()
self._old_global_override = config.CONFIG.global_override
config.CONFIG.global_override = self._old_global_override.copy()
config.CONFIG.global_override.update(self._overrides)
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Stop()
def Stop(self):
config.CONFIG.cache = self._old_cache
config.CONFIG.global_override = self._old_global_override
class PreserveConfig(object):
def __enter__(self):
self.Start()
def Start(self):
self.old_config = config.CONFIG
config.CONFIG = self.old_config.MakeNewConfig()
config.CONFIG.initialized = self.old_config.initialized
config.CONFIG.SetWriteBack(self.old_config.writeback.filename)
config.CONFIG.raw_data = self.old_config.raw_data.copy()
config.CONFIG.writeback_data = self.old_config.writeback_data.copy()
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Stop()
def Stop(self):
config.CONFIG = self.old_config
class FakeTime(object):
"""A context manager for faking time."""
def __init__(self, fake_time, increment=0):
if isinstance(fake_time, rdfvalue.RDFDatetime):
self.time = fake_time.AsMicrosecondsSinceEpoch() / 1e6
elif isinstance(fake_time, str):
self.time = rdfvalue.RDFDatetime.FromHumanReadable(
fake_time).AsMicrosecondsSinceEpoch() / 1e6
else:
self.time = fake_time
self.increment = increment
def __enter__(self):
self.old_time = time.time
def Time():
self.time += self.increment
return self.time
time.time = Time
self.old_strftime = time.strftime
def Strftime(form, t=time.localtime(Time())):
return self.old_strftime(form, t)
time.strftime = Strftime
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
time.time = self.old_time
time.strftime = self.old_strftime
# TODO(hanuszczak): `FakeTime` and `FakeTimeline` serve a similar purpose,
# although `FakeTimeline` (arguably) allows to write more sophisticated tests.
# Therefore, it should be possible to rewrite existing test code to use
# `FakeTimeline` instead of `FakeTime`. Once done, `FakeTime` should be removed.
# TODO(hanuszczak): Write proper documentation.
class FakeTimeline(object):
"""A context manager for testing time-aware code.
This utility class overrides `time.sleep` and `time.time` methods so that the
code that uses them can be tested. It is assumed that the code that needs to
be tested runs on some thread. Using `Run` method one can simulate running
this thread for certain amount of time but without spending that time waiting
for anything.
While internally the simulation actually executes the code on a separate
thread, it can be thought as if the code was executed synchronously on the
current thread. However, the time flow is "immediate" and `time.sleep` calls
do not really block.
For example, it is possible to instantly simulate running a thread for half an
hour (assuming that most of that time the thread would be spent sleeping).
In order to reliably test flow of time-aware code, it is assumed that only the
`time.sleep` function causes the time flow. In other words, every non-`sleep`
line of code is assumed to be executed instantly. In particular, if there is
an infinite loop without any `time.sleep` calls the running the simulation
for any number of seconds will block indefinitely. This is not a big issue
since this class is intended to be used only for testing purposes.
"""
class _WorkerThreadExit(Exception): # pylint: disable=g-bad-exception-name
pass
def __init__(self, thread, now=None):
"""Initializes the timeline.
Args:
thread: A thread to perform controlled execution on.
now: An `RDFDatetime` object representing starting point of the timeline.
If no value is provided, current time is used.
Raises:
TypeError: If `thread` is not an instance of `Thread` or if `now` is not
an instance of `RDFDatetime`.
"""
if not isinstance(thread, threading.Thread):
raise TypeError("`thread` is not an instance of `threading.Thread`")
if now is not None and not isinstance(now, rdfvalue.RDFDatetime):
raise TypeError("`now` is not an instance of `rdfvalue.RDFDatetime`")
self._thread = thread
self._owner_thread_turn = threading.Event()
self._worker_thread_turn = threading.Event()
# Fake, "current" number of seconds since epoch.
self._time = (now or rdfvalue.RDFDatetime.Now()).AsSecondsSinceEpoch()
# Number of seconds that the worker thread can sleep.
self._budget = 0
self._worker_thread = None
self._worker_thread_done = False
self._worker_thread_exception = None
def Run(self, duration):
"""Simulated running the underlying thread for the specified duration.
Args:
duration: A `Duration` object describing for how long simulate the thread.
Raises:
TypeError: If `duration` is not an instance of `rdfvalue.Duration`.
AssertionError: If this method is called without automatic context.
"""
precondition.AssertType(duration, rdfvalue.Duration)
if self._worker_thread is None:
raise AssertionError("Worker thread hasn't been started (method was "
"probably called without context initialization)")
if self._worker_thread_done:
return
self._budget += duration.ToInt(rdfvalue.SECONDS)
self._original_time = time.time
self._original_sleep = time.sleep
with utils.Stubber(time, "time", self._Time),\
utils.Stubber(time, "sleep", self._Sleep):
self._owner_thread_turn.clear()
self._worker_thread_turn.set()
self._owner_thread_turn.wait()
if self._worker_thread_exception is not None:
# TODO(hanuszczak): Investigate why this linter warning is triggered.
raise self._worker_thread_exception # pylint: disable=raising-bad-type
def __enter__(self):
if self._worker_thread is not None:
raise AssertionError("Worker thread has been already started, context "
"cannot be reused.")
def Worker():
self._worker_thread_turn.wait()
try:
if self._worker_thread_done:
raise FakeTimeline._WorkerThreadExit
self._thread.run()
except FakeTimeline._WorkerThreadExit:
pass
except Exception as exception: # pylint: disable=broad-except
self._worker_thread_exception = exception
self._worker_thread_done = True
self._owner_thread_turn.set()
self._worker_thread = threading.Thread(
target=Worker, name="FakeTimelineThread")
self._worker_thread.start()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
del exc_type, exc_value, exc_traceback # Unused.
self._worker_thread_done = True
self._worker_thread_turn.set()
self._worker_thread.join(5.0)
if self._worker_thread.is_alive():
raise RuntimeError("FakeTimelineThread did not complete.")
def _Sleep(self, seconds):
if threading.current_thread() is not self._worker_thread:
return self._original_sleep(seconds)
self._time += seconds
self._budget -= seconds
while self._budget < 0:
self._worker_thread_turn.clear()
self._owner_thread_turn.set()
self._worker_thread_turn.wait()
if self._worker_thread_done:
raise FakeTimeline._WorkerThreadExit()
def _Time(self):
if threading.current_thread() is not self._worker_thread:
return self._original_time()
return self._time
class FakeDateTimeUTC(object):
"""A context manager for faking time when using datetime.utcnow."""
def __init__(self, fake_time, increment=0):
self.time = fake_time
self.increment = increment
def __enter__(self):
self.old_datetime = datetime.datetime
class FakeDateTime(object):
def __init__(self, time_val, increment, orig_datetime):
self.time = time_val
self.increment = increment
self.orig_datetime = orig_datetime
def __call__(self, *args, **kw):
return self.orig_datetime(*args, **kw)
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.orig_datetime, name)
def utcnow(self): # pylint: disable=invalid-name
self.time += self.increment
return self.orig_datetime.utcfromtimestamp(self.time)
datetime.datetime = FakeDateTime(self.time, self.increment,
self.old_datetime)
def __exit__(self, unused_type, unused_value, unused_traceback):
datetime.datetime = self.old_datetime
class Instrument(object):
"""A helper to instrument a function call.
Stores a copy of all function call args locally for later inspection.
"""
def __init__(self, module, target_name):
self.old_target = getattr(module, target_name)
@functools.wraps(self.old_target)
def Wrapper(*args, **kwargs):
self.args.append(args)
self.kwargs.append(kwargs)
self.call_count += 1
return self.old_target(*args, **kwargs)
self.stubber = utils.Stubber(module, target_name, Wrapper)
self.args = []
self.kwargs = []
self.call_count = 0
def __enter__(self):
self.stubber.__enter__()
return self
def __exit__(self, t, value, tb):
return self.stubber.__exit__(t, value, tb)
def RequiresPackage(package_name):
"""Skip this test if required package isn't present.
Note this will only work in opensource testing where we actually have
packages.
Args:
package_name: string
Returns:
Decorator function
"""
def Decorator(test_function):
@functools.wraps(test_function)
def Wrapper(*args, **kwargs):
try:
pkg_resources.get_distribution(package_name)
except pkg_resources.DistributionNotFound:
raise unittest.SkipTest("Skipping, package %s not installed" %
package_name)
return test_function(*args, **kwargs)
return Wrapper
return Decorator
class SuppressLogs(object):
"""A context manager for suppressing logging."""
def __enter__(self):
self.old_error = logging.error
self.old_warning = logging.warning
self.old_info = logging.info
self.old_debug = logging.debug
logging.error = lambda *args, **kw: None
logging.warning = lambda *args, **kw: None
logging.info = lambda *args, **kw: None
logging.debug = lambda *args, **kw: None
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
logging.error = self.old_error
logging.warning = self.old_warning
logging.info = self.old_info
logging.debug = self.old_debug
# TODO(user): It would be nice if all doctested functions (or even examples)
# had their own method in the TestCase. This allows faster developer cycles,
# because the developer sees all failures instead of only the first one. Also,
# it makes it easier to see if a doctest has been added for a new docstring.
class DocTest(absltest.TestCase):
"""A TestCase that tests examples in docstrings using doctest.
Attributes:
module: A reference to the module to be tested.
"""
module = None
def testDocStrings(self):
"""Test all examples in docstrings using doctest."""
if not compatibility.PY2:
# TODO(user): Migrate all doctests to Python 3 only once we use Python 3
# in production.
self.skipTest("DocTest is disabled for Python 3 because of unicode string"
" formatting.")
self.assertIsNotNone(self.module, "Set DocTest.module to test docstrings.")
try:
num_failed, num_attempted = doctest.testmod(
self.module, raise_on_error=True)
except doctest.DocTestFailure as e:
name = e.test.name
if "." in name:
name = name.split(".")[-1] # Remove long module prefix.
filename = os.path.basename(e.test.filename)
self.fail("DocTestFailure in {} ({} on line {}):\n"
">>> {}Expected : {}Actual : {}".format(
name, filename, e.test.lineno, e.example.source,
e.example.want, e.got))
# Fail if DocTest is referenced, but no examples in docstrings are present.
self.assertGreater(num_attempted, 0, "No doctests were found!")
# num_failed > 0 should not happen because raise_on_error = True.
self.assertEqual(num_failed, 0, "{} doctests failed.".format(num_failed))
def main(argv=None):
del argv # Unused.
testing_startup.TestInit()
absltest.main()
|
luntan.py
|
import requests
from bs4 import BeautifulSoup
import logging
import os
import queue
import threading
import time
import random
home = 'D:\\luntuan_girls_thread\\'
success_count = 0
count_lock = threading.Lock()
def save_img(url):
try:
global home, success_count
rsp = requests.get(url)
name = url.split('/')[-1]
full_path = "%s%s" % (home, name)
with open(full_path, 'wb') as fh:
fh.write(rsp.content)
with count_lock:
success_count += 1
except Exception as e:
logging.error(e)
def get_all_img(src_link):
logging.info(src_link)
for s in src_link:
save_img(s)
def all_girl_src_list(bs_obj):
key = '小树医生心理生理医务室'
p_list = bs_obj.find_all('p')
girl_img_url_list = []
for p in p_list:
a = p.text
if key in a:
girl_tag = p.find_next_sibling('p')
next_p = girl_tag.find_next('img')
try:
girl_src = next_p['src']
girl_img_url_list.append(girl_src)
break
except Exception as e:
logging.error("except: %s, obj:%s", e, next_p)
logging.info(girl_img_url_list)
return girl_img_url_list
def parse_one(url, header):
logging.info(url)
req_obj = requests.get(url, headers=header)
bs_obj = BeautifulSoup(req_obj.text, 'html.parser')
l = all_girl_src_list(bs_obj)
get_all_img(l)
def parse_one_thread(q, header):
my_name = threading.current_thread().name
while 1:
try:
url = q.get()
if url == 'END':
logging.info('%s:END', my_name)
break
logging.info(url)
req_obj = requests.get(url, headers=header)
bs_obj = BeautifulSoup(req_obj.text, 'html.parser')
l = all_girl_src_list(bs_obj)
get_all_img(l)
time.sleep(0.5)
except Exception as e:
logging.error("e:%s, url:%s", e, url)
time.sleep(5)
def get_all_link(url, header):
global success_count
thread_num = 2
all_img_num = 0
req_obj = requests.get(url, headers=header)
bs_obj = BeautifulSoup(req_obj.text, 'html.parser')
all_a = bs_obj.find_all('a', {'class':'blog-title'})
a_list = []
for a in all_a:
a_list.append(a['href'])
logging.info(a_list)
q_list = [queue.Queue() for i in range(thread_num)]
t_list = [threading.Thread(target=parse_one_thread, args=(q, header)) for q in q_list]
for t in t_list:
t.start()
index = 0
for href in a_list:
all_img_num += 10
q_list[index].put(href)
index += 1
index = index % thread_num
if index == 9:
for t in t_list:
if not t.is_alive():
logging.info("%s dead", t.name)
old_index = t_list.index(t)
old_queue = q_list[old_index]
new_t = threading.Thread(target=parse_one_thread, args=(old_queue, header))
t_list.insert(old_index, new_t)
for q in q_list:
q.put("END")
for t in t_list:
t.join()
logging.info("all done:should download: %d, success: %d", all_img_num, success_count)
if __name__ == "__main__":
logging.basicConfig(filename='luntan.log', filemode='w', level=logging.DEBUG,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36"
}
header_list = [
{"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36 OPR/38.0.2220.41"},
{"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0)"},
{"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1"},
#{"User-Agent": "Googlebot/2.1 (+http://www.google.com/bot.html)"},
]
try:
if not os.path.exists(home):
os.mkdir(home)
except Exception as e:
logging.error(e)
for i in range(100):
header_t = header_list[random.randint(0, len(header_list)-1)]
main_url = 'https://my.oschina.net/xxiaobian/blog/?sort=time&p=%d' % (i+1)
get_all_link(main_url, header_t)
time.sleep(2)
|
progress_bar.py
|
import sys
import time
import math
from collections import deque
from datetime import timedelta
from typing import Optional
from ..logging.profile import TimeContext
from ..helper import colored, get_readable_size, get_readable_time
class ProgressBar(TimeContext):
"""
A simple progress bar.
Example:
.. highlight:: python
.. code-block:: python
with ProgressBar('loop') as bar:
do_busy()
:param task_name: The name of the task, will be displayed in front of the bar.
:param total: Number of steps in the bar. Defaults to 100.
:param bar_len: Total length of the bar.
:param logger: Jina logger
"""
window_size = 5 # average window size
suffix = '%(percent).2f %% eta: %(eta_td)s'
bar_prefix = ' |'
bar_suffix = '| '
empty_fill = ' '
fill = '#'
def __init__(
self,
task_name: str,
total: float = 100.0,
bar_len: int = 32,
logger=None,
**kwargs,
):
super().__init__(task_name, logger)
self.task_name = task_name
self.total = total
self.bar_len = bar_len
self.average = 0
self._avg_queue = deque(maxlen=self.window_size)
self._avg_update_ts = None
self._update_ts = None
for key, val in kwargs.items():
setattr(self, key, val)
def __getitem__(self, key):
if key.startswith('_'):
return None
return getattr(self, key, None)
@property
def remaining(self):
"""Return the remaining steps to be completed
:return: the remaining steps
"""
return max(self.total - self.completed, 0)
@property
def elapsed(self):
"""Return the elapsed time
:return: the elapsed seconds
"""
return int(time.perf_counter() - self.start)
@property
def elapsed_td(self) -> 'timedelta':
"""Return the timedelta of elapsed time
:return: the timedelta of elapsed seconds
"""
return timedelta(seconds=self.elapsed)
@property
def eta(self):
"""Return EAT (estimated time of arrival)
:return: return the seconds of ETA
"""
return math.ceil(self.avg * self.remaining)
@property
def eta_td(self) -> 'timedelta':
"""Return the timedelta of ETA
:return: the timedelta of ETA
"""
return timedelta(seconds=self.eta)
@property
def percent(self) -> float:
"""Calculate percentage complete.
:return: the percentage of completed
"""
return self.completed / self.total * 100
def update_avg(self, steps: float, dt: float):
"""Update the average of speed
:param steps: the completed steps
:param dt: the time seconds to use
"""
if steps > 0:
win_len = len(self._avg_queue)
self._avg_queue.append(dt / steps)
now = time.perf_counter()
if win_len < self.window_size or now - self._avg_update_ts > 1:
self.avg = sum(self._avg_queue) / len(self._avg_queue)
self._avg_update_ts = now
def update(
self,
steps: Optional[float] = 1.0,
completed: Optional[float] = None,
total: Optional[float] = None,
suffix_msg: Optional[str] = None,
):
"""Update progress with new values.
:param steps: Number of incremental completed steps.
:param completed: : Number of completed steps.
:param total: Total number of steps, or `None` to not change. Defaults to None.
:param suffix_msg: the suffix message
"""
now = time.perf_counter()
if completed is not None:
steps = max(0, completed - self.completed)
self.completed = completed
else:
self.completed += steps
self.update_avg(steps, now - self._update_ts)
self._update_ts = now
self.total = total if total is not None else self.total
num_bars = int(max(1, self.percent / 100 * self.bar_len))
num_bars = num_bars % self.bar_len
num_bars = self.bar_len if not num_bars and self.completed else max(num_bars, 1)
sys.stdout.write('\r')
suffix = (suffix_msg or self.suffix) % self
line = ''.join(
[
'⏳ {:>10}'.format(colored(self.task_name, 'cyan')),
self.bar_prefix,
colored(self.fill * num_bars, 'green'),
self.empty_fill * (self.bar_len - num_bars),
self.bar_suffix,
suffix,
]
)
sys.stdout.write(line)
# if num_bars == self.bar_len:
# sys.stdout.write('\n')
sys.stdout.flush()
def __enter__(self):
super().__enter__()
self.completed = -1
self._update_ts = self.start
self._avg_update_ts = self.start
self.update()
return self
def _enter_msg(self):
pass
def _exit_msg(self):
sys.stdout.write(
f'\t{colored(f"✅ done in ⏱ {self.readable_duration}", "green")}\n'
)
class ChargingBar(ProgressBar):
"""Charging Bar"""
bar_prefix = ' '
bar_suffix = ' '
empty_fill = '∙'
fill = '█'
class Spinner(TimeContext):
"""Spinner"""
phases = ('-', '\\', '|', '/')
def __enter__(self):
super().__enter__()
self.completed = -1
import threading
self._completed = threading.Event()
self._thread = threading.Thread(target=self.running, daemon=True)
self._thread.start()
return self
def __exit__(self, typ, value, traceback):
super().__exit__(typ, value, traceback)
self._completed.set()
self._thread.join()
def running(self):
"""daemon thread to output spinner"""
while not self._completed.is_set():
self.completed += 1
i = self.completed % len(self.phases)
sys.stdout.write('\r')
line = ' '.join(
['⏳ {:>10}'.format(colored(self.task_name, 'cyan')), self.phases[i]]
)
sys.stdout.write(line)
sys.stdout.flush()
time.sleep(0.5)
def update(self, **kwargs):
"""Update the progress bar
:param kwargs: parameters that can be accepted
"""
pass
def _exit_msg(self):
sys.stdout.write(
f'\t{colored(f"✅ done in ⏱ {self.readable_duration}", "green")}\n'
)
class PieSpinner(Spinner):
"""PieSpinner"""
phases = ['◷', '◶', '◵', '◴']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.